summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/arm/qcom.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/panel/abt,y030xx067a.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/panel/advantech,idk-1110wr.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/advantech,idk-2121wr.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/bananapi,s070wv20-ct16.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/dlc,dlc0700yzg-1.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/himax,hx83112a.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/ilitek,ili9163.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/ilitek,ili9322.yaml3
-rw-r--r--Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,ej030na.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,p097pfg.yaml26
-rw-r--r--Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/panel/lgphilips,lb035q02.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/lxd,m9189a.yaml64
-rw-r--r--Documentation/devicetree/bindings/display/panel/mitsubishi,aa104xd12.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/mitsubishi,aa121td01.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/motorola,mot-panel.yaml69
-rw-r--r--Documentation/devicetree/bindings/display/panel/nec,nl8048hl11.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/novatek,nt36523.yaml3
-rw-r--r--Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml3
-rw-r--r--Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/pda,91-00156-a0.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/raydium,rm68200.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/raydium,rm692e5.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,ld9040.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,lms380kf01.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,s6d27a1.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,s6d7aa0.yaml8
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,s6e3ha8.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/panel/sgd,gktw70sdae4se.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/panel/sony,acx565akm.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/startek,kd070fhfid015.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/tpo,td.yaml5
-rw-r--r--Documentation/devicetree/bindings/display/panel/visionox,r66451.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/visionox,rm69299.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/visionox,vtdr6130.yaml4
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml4
-rw-r--r--MAINTAINERS11
-rw-r--r--drivers/accel/amdxdna/aie2_pci.c53
-rw-r--r--drivers/accel/amdxdna/aie2_pci.h28
-rw-r--r--drivers/accel/amdxdna/amdxdna_pci_drv.c4
-rw-r--r--drivers/accel/ivpu/ivpu_gem.c12
-rw-r--r--drivers/accel/ivpu/ivpu_hw_40xx_reg.h6
-rw-r--r--drivers/accel/ivpu/ivpu_hw_ip.c1
-rw-r--r--drivers/dma-buf/heaps/cma_heap.c7
-rw-r--r--drivers/gpu/buddy.c272
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c53
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c529
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c135
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c146
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c107
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v12_1.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v7_0.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/lsdma_v7_1.c99
-rw-r--r--drivers/gpu/drm/amd/amdgpu/lsdma_v7_1.h31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_1.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc24.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc_v1_0.c50
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.c301
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c134
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h61
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_bios_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_plane.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_trace.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h26
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn21/dcn21_dccg.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn301/dcn301_dccg.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.c59
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h31
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c56
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn42_soc_bb.h263
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c111
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn42.c76
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn42.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c201
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h23
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.c85
-rw-r--r--drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.h16
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c158
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c32
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c421
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.c553
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.h10
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_init.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h73
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h36
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h18
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h112
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_detection.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c36
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_panel_replay.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c177
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h25
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn42/dcn42_mpc.c390
-rw-r--r--drivers/gpu/drm/amd/display/dc/mpc/dcn42/dcn42_mpc.h50
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn42/dcn42_optc.c105
-rw-r--r--drivers/gpu/drm/amd/display/dc/optc/dcn42/dcn42_optc.h13
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn42/dcn42_resource.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile3
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c163
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c4
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h71
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h3
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/clk/clk_15_0_0_offset.h44
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/clk/clk_15_0_0_sh_mask.h52
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_2_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_2_0_sh_mask.h9
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_1_0_offset.h44
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_1_0_sh_mask.h105
-rw-r--r--drivers/gpu/drm/amd/include/discovery.h13
-rw-r--r--drivers/gpu/drm/amd/include/kgd_pp_interface.h3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c42
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h13
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c3
-rw-r--r--drivers/gpu/drm/bridge/analogix/Kconfig3
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c41
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c257
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h2
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c18
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c30
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c79
-rw-r--r--drivers/gpu/drm/i915/Makefile7
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c1
-rw-r--r--drivers/gpu/drm/i915/display/g4x_hdmi.c1
-rw-r--r--drivers/gpu/drm/i915/display/hsw_ips.c10
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_plane.c1
-rw-r--r--drivers/gpu/drm/i915/display/i9xx_wm.c10
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.c150
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.h9
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c31
-rw-r--r--drivers/gpu/drm/i915/display/intel_bo.c78
-rw-r--r--drivers/gpu/drm/i915/display/intel_bo.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c24
-rw-r--r--drivers/gpu/drm/i915/display/intel_casf.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c63
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt_regs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_crtc.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_cursor.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.c741
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_de.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c151
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h27
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_device.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c22
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_limits.h26
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c29
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_map.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c74
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_regs.h268
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_rps.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_snapshot.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.c83
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_wa.h28
-rw-r--r--drivers/gpu/drm/i915/display/intel_dmc.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c869
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c53
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c209
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_tunnel.c96
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.c316
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt_common.c35
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpt_common.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_dram.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c107
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb_buffer.h22
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb.c41
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.c101
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_bo.h25
-rw-r--r--drivers/gpu/drm/i915/display/intel_fb_pin.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c17
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.c38
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev_fb.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fdi.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_flipq.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c37
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.h21
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c19
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_lt_phy.c245
-rw-r--r--drivers/gpu/drm/i915/display/intel_lt_phy.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_lt_phy_regs.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_modeset_setup.c4
-rw-r--r--drivers/gpu/drm/i915/display/intel_oprom_regs.h15
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c584
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.h48
-rw-r--r--drivers/gpu/drm/i915/display/intel_parent.c156
-rw-r--r--drivers/gpu/drm/i915/display/intel_parent.h51
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_refclk.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_pmdemand.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c117
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_rom.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c96
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc_regs.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.c315
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c55
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr_regs.h1
-rw-r--r--drivers/gpu/drm/i915/display/skl_scaler.c2
-rw-r--r--drivers/gpu/drm/i915/display/skl_universal_plane.c7
-rw-r--r--drivers/gpu/drm/i915/display/skl_watermark.c26
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_clflush.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_domain.c6
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c30
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.c69
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h24
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c12
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c27
-rw-r--r--drivers/gpu/drm/i915/gt/gen6_ppgtt.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c13
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_execlists_submission.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c24
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c21
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c26
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c10
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c4
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c50
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c1
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c3
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h4
-rw-r--r--drivers/gpu/drm/i915/i915_active.c2
-rw-r--r--drivers/gpu/drm/i915/i915_bo.c156
-rw-r--r--drivers/gpu/drm/i915/i915_bo.h9
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c1
-rw-r--r--drivers/gpu/drm/i915/i915_dpt.c277
-rw-r--r--drivers/gpu/drm/i915/i915_dpt.h20
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c32
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_dsb_buffer.c (renamed from drivers/gpu/drm/i915/display/intel_dsb_buffer.c)28
-rw-r--r--drivers/gpu/drm/i915/i915_dsb_buffer.h9
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h1
-rw-r--r--drivers/gpu/drm/i915/i915_hwmon.c2
-rw-r--r--drivers/gpu/drm/i915/i915_initial_plane.c3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1
-rw-r--r--drivers/gpu/drm/i915/i915_overlay.c517
-rw-r--r--drivers/gpu/drm/i915/i915_overlay.h11
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c34
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h465
-rw-r--r--drivers/gpu/drm/i915/i915_reg_defs.h179
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence_work.c2
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c10
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h7
-rw-r--r--drivers/gpu/drm/i915/i915_vma_resource.c2
-rw-r--r--drivers/gpu/drm/i915/intel_clock_gating.c42
-rw-r--r--drivers/gpu/drm/i915/intel_gvt_mmio_table.c10
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.c17
-rw-r--r--drivers/gpu/drm/i915/intel_pcode.h9
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c4
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp.c6
-rw-r--r--drivers/gpu/drm/i915/pxp/intel_pxp_irq.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_sw_fence.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_mmap.c41
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_mmap.h8
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c2
-rw-r--r--drivers/gpu/drm/i915/vlv_suspend.c1
-rw-r--r--drivers/gpu/drm/imagination/pvr_gem.c6
-rw-r--r--drivers/gpu/drm/loongson/lsdc_drv.c3
-rw-r--r--drivers/gpu/drm/loongson/lsdc_gem.c15
-rw-r--r--drivers/gpu/drm/loongson/lsdc_gem.h4
-rw-r--r--drivers/gpu/drm/mxsfb/lcdif_drv.c8
-rw-r--r--drivers/gpu/drm/panel/Kconfig32
-rw-r--r--drivers/gpu/drm/panel/Makefile3
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9882t.c14
-rw-r--r--drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c244
-rw-r--r--drivers/gpu/drm/panel/panel-lxd-m9189a.c243
-rw-r--r--drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c15
-rw-r--r--drivers/gpu/drm/panel/panel-motorola-mot.c244
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt37700f.c299
-rw-r--r--drivers/gpu/drm/panel/panel-renesas-r61307.c40
-rw-r--r--drivers/gpu/drm/panel/panel-renesas-r69328.c38
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c92
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_fbdev.c17
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h5
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c6
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c14
-rw-r--r--drivers/gpu/drm/xe/Makefile9
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_reg.h6
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h36
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h11
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h9
-rw-r--r--drivers/gpu/drm/xe/display/intel_bo.c109
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.c27
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c8
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_bo.c (renamed from drivers/gpu/drm/xe/display/intel_fb_bo.c)63
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_bo.h9
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_pcode.c38
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_pcode.h9
-rw-r--r--drivers/gpu/drm/xe/display/xe_display_vma.h18
-rw-r--r--drivers/gpu/drm/xe/display/xe_dsb_buffer.c28
-rw-r--r--drivers/gpu/drm/xe/display/xe_dsb_buffer.h9
-rw-r--r--drivers/gpu/drm/xe/display/xe_fb_pin.c26
-rw-r--r--drivers/gpu/drm/xe/display/xe_frontbuffer.c71
-rw-r--r--drivers/gpu/drm/xe/display/xe_frontbuffer.h9
-rw-r--r--drivers/gpu/drm/xe/display/xe_initial_plane.c27
-rw-r--r--drivers/gpu/drm/xe/regs/xe_reg_defs.h5
-rw-r--r--drivers/gpu/drm/xe/xe_eu_stall.c20
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.c6
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c8
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c14
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.c9
-rw-r--r--drivers/gpu/drm/xe/xe_mmio.h1
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c42
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.c30
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.h8
-rw-r--r--drivers/gpu/drm/xe/xe_pxp.c4
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.c4
-rw-r--r--drivers/gpu/tests/gpu_buddy_test.c493
-rw-r--r--include/drm/drm_fb_helper.h2
-rw-r--r--include/drm/intel/display_parent_interface.h108
-rw-r--r--include/drm/intel/i915_drm.h82
-rw-r--r--include/drm/intel/intel_gmd_interrupt_regs.h92
-rw-r--r--include/drm/intel/intel_gmd_misc_regs.h21
-rw-r--r--include/drm/intel/intel_pcode_regs.h108
-rw-r--r--include/drm/intel/pick.h51
-rw-r--r--include/drm/intel/reg_bits.h139
-rw-r--r--include/linux/gpu_buddy.h2
-rw-r--r--include/linux/iopoll.h8
-rw-r--r--include/uapi/drm/amdxdna_accel.h3
-rw-r--r--include/video/vga.h1
471 files changed, 11872 insertions, 6964 deletions
diff --git a/Documentation/devicetree/bindings/arm/qcom.yaml b/Documentation/devicetree/bindings/arm/qcom.yaml
index d48c625d3fc4..1a955d1b14bf 100644
--- a/Documentation/devicetree/bindings/arm/qcom.yaml
+++ b/Documentation/devicetree/bindings/arm/qcom.yaml
@@ -846,6 +846,12 @@ properties:
- items:
- enum:
+ - google,bonito-tianma
+ - const: google,bonito
+ - const: qcom,sdm670
+
+ - items:
+ - enum:
- qcom,sdx55-mtp
- qcom,sdx55-telit-fn980-tlb
- qcom,sdx55-t55
diff --git a/Documentation/devicetree/bindings/display/panel/abt,y030xx067a.yaml b/Documentation/devicetree/bindings/display/panel/abt,y030xx067a.yaml
index 0aa2d3fbadaa..72cbb9ee5eae 100644
--- a/Documentation/devicetree/bindings/display/panel/abt,y030xx067a.yaml
+++ b/Documentation/devicetree/bindings/display/panel/abt,y030xx067a.yaml
@@ -20,11 +20,6 @@ properties:
reg:
maxItems: 1
- backlight: true
- port: true
- power-supply: true
- reset-gpios: true
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/panel/advantech,idk-1110wr.yaml b/Documentation/devicetree/bindings/display/panel/advantech,idk-1110wr.yaml
index f6fea9085aab..76b48836ddf6 100644
--- a/Documentation/devicetree/bindings/display/panel/advantech,idk-1110wr.yaml
+++ b/Documentation/devicetree/bindings/display/panel/advantech,idk-1110wr.yaml
@@ -41,8 +41,6 @@ properties:
panel-timing: true
port: true
-additionalProperties: false
-
required:
- compatible
- data-mapping
@@ -51,6 +49,8 @@ required:
- panel-timing
- port
+additionalProperties: false
+
examples:
- |+
panel {
diff --git a/Documentation/devicetree/bindings/display/panel/advantech,idk-2121wr.yaml b/Documentation/devicetree/bindings/display/panel/advantech,idk-2121wr.yaml
index 05ca3b2385f8..c9b066e69e2f 100644
--- a/Documentation/devicetree/bindings/display/panel/advantech,idk-2121wr.yaml
+++ b/Documentation/devicetree/bindings/display/panel/advantech,idk-2121wr.yaml
@@ -56,8 +56,6 @@ properties:
- port@0
- port@1
-additionalProperties: false
-
required:
- compatible
- width-mm
@@ -65,6 +63,8 @@ required:
- data-mapping
- panel-timing
+additionalProperties: false
+
examples:
- |+
panel-lvds {
diff --git a/Documentation/devicetree/bindings/display/panel/bananapi,s070wv20-ct16.yaml b/Documentation/devicetree/bindings/display/panel/bananapi,s070wv20-ct16.yaml
index bbf127fb28f7..46e7cff5b2fa 100644
--- a/Documentation/devicetree/bindings/display/panel/bananapi,s070wv20-ct16.yaml
+++ b/Documentation/devicetree/bindings/display/panel/bananapi,s070wv20-ct16.yaml
@@ -22,10 +22,10 @@ properties:
enable-gpios: true
port: true
-additionalProperties: false
-
required:
- compatible
- power-supply
+additionalProperties: false
+
...
diff --git a/Documentation/devicetree/bindings/display/panel/dlc,dlc0700yzg-1.yaml b/Documentation/devicetree/bindings/display/panel/dlc,dlc0700yzg-1.yaml
index 287e2feb6533..9a2c532dbc92 100644
--- a/Documentation/devicetree/bindings/display/panel/dlc,dlc0700yzg-1.yaml
+++ b/Documentation/devicetree/bindings/display/panel/dlc,dlc0700yzg-1.yaml
@@ -22,10 +22,10 @@ properties:
backlight: true
port: true
-additionalProperties: false
-
required:
- compatible
- power-supply
+additionalProperties: false
+
...
diff --git a/Documentation/devicetree/bindings/display/panel/himax,hx83112a.yaml b/Documentation/devicetree/bindings/display/panel/himax,hx83112a.yaml
index 56bcd152f43c..2c60d0cd704e 100644
--- a/Documentation/devicetree/bindings/display/panel/himax,hx83112a.yaml
+++ b/Documentation/devicetree/bindings/display/panel/himax,hx83112a.yaml
@@ -33,8 +33,6 @@ properties:
vsp-supply:
description: Negative source voltage rail
- port: true
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml b/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml
index 5725a587e35c..182a2b825e1c 100644
--- a/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml
+++ b/Documentation/devicetree/bindings/display/panel/himax,hx8394.yaml
@@ -54,8 +54,6 @@ required:
- vcc-supply
- iovcc-supply
-additionalProperties: false
-
allOf:
- $ref: panel-common.yaml#
- if:
@@ -68,6 +66,8 @@ allOf:
required:
- reset-gpios
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/gpio/gpio.h>
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9163.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9163.yaml
index ef5a2240b684..cc80d0e90f1a 100644
--- a/Documentation/devicetree/bindings/display/panel/ilitek,ili9163.yaml
+++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9163.yaml
@@ -34,10 +34,6 @@ properties:
maxItems: 1
description: Display data/command selection (D/CX)
- backlight: true
- reset-gpios: true
- rotation: true
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9322.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9322.yaml
index 4bdc33d12306..c97bfd0f2ebc 100644
--- a/Documentation/devicetree/bindings/display/panel/ilitek,ili9322.yaml
+++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9322.yaml
@@ -29,9 +29,6 @@ properties:
reg:
maxItems: 1
- reset-gpios: true
- port: true
-
vcc-supply:
description: Core voltage supply
diff --git a/Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml b/Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml
index 5f41758c96d5..aeb7cb26c058 100644
--- a/Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml
+++ b/Documentation/devicetree/bindings/display/panel/ilitek,ili9341.yaml
@@ -40,8 +40,6 @@ properties:
spi-max-frequency:
const: 10000000
- port: true
-
vci-supply:
description: Analog voltage supply (2.5 .. 3.3V)
@@ -51,8 +49,6 @@ properties:
vddi-led-supply:
description: Voltage supply for the LED driver (1.65 .. 3.3 V)
-unevaluatedProperties: false
-
required:
- compatible
- reg
@@ -68,6 +64,8 @@ then:
required:
- port
+unevaluatedProperties: false
+
examples:
- |+
#include <dt-bindings/gpio/gpio.h>
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,ej030na.yaml b/Documentation/devicetree/bindings/display/panel/innolux,ej030na.yaml
index c7df9a7f6589..59cc7edb22bb 100644
--- a/Documentation/devicetree/bindings/display/panel/innolux,ej030na.yaml
+++ b/Documentation/devicetree/bindings/display/panel/innolux,ej030na.yaml
@@ -20,11 +20,6 @@ properties:
reg:
maxItems: 1
- backlight: true
- port: true
- power-supply: true
- reset-gpios: true
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.yaml b/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.yaml
index 4164e3f7061d..7c75e01797f6 100644
--- a/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.yaml
+++ b/Documentation/devicetree/bindings/display/panel/innolux,p097pfg.yaml
@@ -10,7 +10,7 @@ maintainers:
- Lin Huang <hl@rock-chips.com>
allOf:
- - $ref: panel-common.yaml#
+ - $ref: panel-common-dual.yaml#
properties:
compatible:
@@ -28,6 +28,9 @@ properties:
avee-supply:
description: The regulator that provides negative voltage
+ port: true
+ ports: true
+
required:
- compatible
- reg
@@ -52,6 +55,27 @@ examples:
avee-supply = <&avee>;
backlight = <&backlight>;
enable-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+
+ mipi_in_panel: endpoint {
+ remote-endpoint = <&mipi_out_panel>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+
+ mipi1_in_panel: endpoint {
+ remote-endpoint = <&mipi1_out_panel>;
+ };
+ };
+ };
};
};
diff --git a/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml b/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml
index 5802fb3c9ffe..e39efb44ed42 100644
--- a/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml
+++ b/Documentation/devicetree/bindings/display/panel/jadard,jd9365da-h3.yaml
@@ -23,6 +23,7 @@ properties:
- melfas,lmfbx101117480
- radxa,display-10hd-ad001
- radxa,display-8hd-ad002
+ - taiguanck,xti05101-01a
- const: jadard,jd9365da-h3
reg:
@@ -35,9 +36,8 @@ properties:
description: supply regulator for VCCIO, usually 1.8V
reset-gpios: true
-
backlight: true
-
+ rotation: true
port: true
required:
diff --git a/Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.yaml b/Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.yaml
index d86c916f7b55..fe7ad266e1b0 100644
--- a/Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.yaml
+++ b/Documentation/devicetree/bindings/display/panel/kingdisplay,kd035g6-54nt.yaml
@@ -20,11 +20,6 @@ properties:
reg:
maxItems: 1
- backlight: true
- port: true
- power-supply: true
- reset-gpios: true
-
spi-3wire: true
required:
diff --git a/Documentation/devicetree/bindings/display/panel/lgphilips,lb035q02.yaml b/Documentation/devicetree/bindings/display/panel/lgphilips,lb035q02.yaml
index 3de17fd8513b..3c8c65c6a869 100644
--- a/Documentation/devicetree/bindings/display/panel/lgphilips,lb035q02.yaml
+++ b/Documentation/devicetree/bindings/display/panel/lgphilips,lb035q02.yaml
@@ -20,10 +20,6 @@ properties:
reg:
maxItems: 1
- label: true
- enable-gpios: true
- port: true
-
spi-cpha: true
spi-cpol: true
diff --git a/Documentation/devicetree/bindings/display/panel/lxd,m9189a.yaml b/Documentation/devicetree/bindings/display/panel/lxd,m9189a.yaml
new file mode 100644
index 000000000000..226974a4077f
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/lxd,m9189a.yaml
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/lxd,m9189a.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: LXD M9189A DSI Display Panel
+
+maintainers:
+ - Michael Tretter <m.tretter@pengutronix.de>
+
+allOf:
+ - $ref: panel-common.yaml
+
+properties:
+ compatible:
+ const: lxd,m9189a
+
+ reg:
+ maxItems: 1
+
+ standby-gpios:
+ description: GPIO used for the standby pin
+ maxItems: 1
+
+ reset-gpios: true
+ power-supply: true
+ backlight: true
+ port: true
+
+required:
+ - compatible
+ - reg
+ - standby-gpios
+ - reset-gpios
+ - power-supply
+ - backlight
+ - port
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel@0 {
+ compatible = "lxd,m9189a";
+ reg = <0>;
+ backlight = <&backlight>;
+ reset-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
+ standby-gpios = <&gpio5 22 GPIO_ACTIVE_LOW>;
+ power-supply = <&reg_display_3v3>;
+
+ port {
+ mipi_panel_in: endpoint {
+ remote-endpoint = <&mipi_dsi_out>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/mitsubishi,aa104xd12.yaml b/Documentation/devicetree/bindings/display/panel/mitsubishi,aa104xd12.yaml
index 96621b89ae9e..43e98bb07c38 100644
--- a/Documentation/devicetree/bindings/display/panel/mitsubishi,aa104xd12.yaml
+++ b/Documentation/devicetree/bindings/display/panel/mitsubishi,aa104xd12.yaml
@@ -47,8 +47,6 @@ properties:
panel-timing: true
port: true
-additionalProperties: false
-
required:
- compatible
- data-mapping
@@ -57,6 +55,8 @@ required:
- panel-timing
- port
+additionalProperties: false
+
examples:
- |+
diff --git a/Documentation/devicetree/bindings/display/panel/mitsubishi,aa121td01.yaml b/Documentation/devicetree/bindings/display/panel/mitsubishi,aa121td01.yaml
index 37f01d847aac..2af993d73619 100644
--- a/Documentation/devicetree/bindings/display/panel/mitsubishi,aa121td01.yaml
+++ b/Documentation/devicetree/bindings/display/panel/mitsubishi,aa121td01.yaml
@@ -44,8 +44,6 @@ properties:
panel-timing: true
port: true
-additionalProperties: false
-
required:
- compatible
- vcc-supply
@@ -55,6 +53,8 @@ required:
- panel-timing
- port
+additionalProperties: false
+
examples:
- |+
panel {
diff --git a/Documentation/devicetree/bindings/display/panel/motorola,mot-panel.yaml b/Documentation/devicetree/bindings/display/panel/motorola,mot-panel.yaml
new file mode 100644
index 000000000000..99fa1b3ed426
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/motorola,mot-panel.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/motorola,mot-panel.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Atrix 4G and Droid X2 DSI Display Panel
+
+maintainers:
+ - Svyatoslav Ryhel <clamor95@gmail.com>
+
+description:
+ Atrix 4G and Droid X2 use the same 540x960 DSI video mode panel. Exact
+ panel vendor and model are unknown hence generic compatible based on the
+ board name "Mot" is used.
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ items:
+ - const: motorola,mot-panel
+
+ reg:
+ maxItems: 1
+
+ vdd-supply:
+ description: Regulator for main power supply.
+
+ vddio-supply:
+ description: Regulator for 1.8V IO power supply.
+
+ backlight: true
+ reset-gpios: true
+ port: true
+
+required:
+ - compatible
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ panel@0 {
+ compatible = "motorola,mot-panel";
+ reg = <0>;
+
+ reset-gpios = <&gpio 35 GPIO_ACTIVE_LOW>;
+
+ vdd-supply = <&vdd_5v0_panel>;
+ vddio-supply = <&vdd_1v8_vio>;
+
+ backlight = <&backlight>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&dsi_out>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/display/panel/nec,nl8048hl11.yaml b/Documentation/devicetree/bindings/display/panel/nec,nl8048hl11.yaml
index 1cffe4d6d498..eb9eeba92359 100644
--- a/Documentation/devicetree/bindings/display/panel/nec,nl8048hl11.yaml
+++ b/Documentation/devicetree/bindings/display/panel/nec,nl8048hl11.yaml
@@ -24,10 +24,6 @@ properties:
reg:
maxItems: 1
- label: true
- port: true
- reset-gpios: true
-
spi-max-frequency:
maximum: 10000000
diff --git a/Documentation/devicetree/bindings/display/panel/novatek,nt36523.yaml b/Documentation/devicetree/bindings/display/panel/novatek,nt36523.yaml
index c4bae4f77085..b9300a1f2646 100644
--- a/Documentation/devicetree/bindings/display/panel/novatek,nt36523.yaml
+++ b/Documentation/devicetree/bindings/display/panel/novatek,nt36523.yaml
@@ -37,9 +37,6 @@ properties:
vddio-supply:
description: regulator that supplies the I/O voltage
- rotation: true
- backlight: true
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml b/Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
index 800a2f0a4dad..5d16d8511725 100644
--- a/Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
+++ b/Documentation/devicetree/bindings/display/panel/novatek,nt36672a.yaml
@@ -47,9 +47,6 @@ properties:
vddneg-supply:
description: phandle of the negative boost supply regulator
- port: true
- backlight: true
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.yaml b/Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.yaml
index 1e4f140f48b8..1f697dab832b 100644
--- a/Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.yaml
+++ b/Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.yaml
@@ -31,12 +31,12 @@ properties:
reset-gpios:
maxItems: 1
-additionalProperties: false
-
required:
- compatible
- reg
+additionalProperties: false
+
examples:
- |
dsi {
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
index 2f90c887b7b8..cc8d795df732 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
@@ -49,6 +49,8 @@ properties:
- lg,lh500wx1-sd03
# Lincoln LCD197 5" 1080x1920 LCD panel
- lincolntech,lcd197
+ # Novatek NT37700F 1080x2160 AMOLED panel
+ - novatek,nt37700f
# One Stop Displays OSD101T2587-53TS 10.1" 1920x1200 panel
- osddisplays,osd101t2587-53ts
# Panasonic 10" WUXGA TFT LCD panel
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
index 106ae91ff474..e98ca672ba49 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
@@ -103,6 +103,8 @@ properties:
- dlc,dlc1010gig
# Emerging Display Technology Corp. 3.5" QVGA TFT LCD panel
- edt,et035012dm6
+ # Emerging Display Technology Corp. 5.7" 24-bit VGA TFT LCD panel
+ - edt,et057023udba
# Emerging Display Technology Corp. 5.7" VGA TFT LCD panel
- edt,et057090dhu
- edt,et070080dh6
@@ -266,6 +268,8 @@ properties:
- powertip,ph128800t006-zhc01
# POWERTIP PH800480T013-IDF2 7.0" WVGA TFT LCD panel
- powertip,ph800480t013-idf02
+ # POWERTIP PH800480T032-ZHC19 7.0" WVGA TFT LCD panel
+ - powertip,ph800480t032-zhc19
# PrimeView PM070WL4 7.0" 800x480 TFT LCD panel
- primeview,pm070wl4
# QiaoDian XianShi Corporation 4"3 TFT LCD panel
diff --git a/Documentation/devicetree/bindings/display/panel/pda,91-00156-a0.yaml b/Documentation/devicetree/bindings/display/panel/pda,91-00156-a0.yaml
index ccd3623b4955..871e4c2d9824 100644
--- a/Documentation/devicetree/bindings/display/panel/pda,91-00156-a0.yaml
+++ b/Documentation/devicetree/bindings/display/panel/pda,91-00156-a0.yaml
@@ -21,11 +21,11 @@ properties:
backlight: true
port: true
-additionalProperties: false
-
required:
- compatible
- power-supply
- backlight
+additionalProperties: false
+
...
diff --git a/Documentation/devicetree/bindings/display/panel/raydium,rm68200.yaml b/Documentation/devicetree/bindings/display/panel/raydium,rm68200.yaml
index 46fe1014ebc4..8fb7c013dfb8 100644
--- a/Documentation/devicetree/bindings/display/panel/raydium,rm68200.yaml
+++ b/Documentation/devicetree/bindings/display/panel/raydium,rm68200.yaml
@@ -33,13 +33,13 @@ properties:
reset-gpios:
maxItems: 1
-additionalProperties: false
-
required:
- compatible
- power-supply
- reg
+additionalProperties: false
+
examples:
- |
dsi {
diff --git a/Documentation/devicetree/bindings/display/panel/raydium,rm692e5.yaml b/Documentation/devicetree/bindings/display/panel/raydium,rm692e5.yaml
index 7ad223f98253..616a5f3ec9fc 100644
--- a/Documentation/devicetree/bindings/display/panel/raydium,rm692e5.yaml
+++ b/Documentation/devicetree/bindings/display/panel/raydium,rm692e5.yaml
@@ -34,8 +34,6 @@ properties:
vddio-supply:
description: I/O voltage rail
- port: true
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml b/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml
index f1723e910252..1bbe0da3997c 100644
--- a/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml
+++ b/Documentation/devicetree/bindings/display/panel/samsung,atna33xc20.yaml
@@ -43,13 +43,13 @@ properties:
no-hpd: true
hpd-gpios: true
-additionalProperties: false
-
required:
- compatible
- enable-gpios
- power-supply
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/clock/qcom,rpmh.h>
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,ld9040.yaml b/Documentation/devicetree/bindings/display/panel/samsung,ld9040.yaml
index bc92b16c95b9..2e64fba472cc 100644
--- a/Documentation/devicetree/bindings/display/panel/samsung,ld9040.yaml
+++ b/Documentation/devicetree/bindings/display/panel/samsung,ld9040.yaml
@@ -20,10 +20,6 @@ properties:
reg:
maxItems: 1
- display-timings: true
- port: true
- reset-gpios: true
-
vdd3-supply:
description: core voltage supply
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,lms380kf01.yaml b/Documentation/devicetree/bindings/display/panel/samsung,lms380kf01.yaml
index 74c2a617c2ff..828b7d7ba17f 100644
--- a/Documentation/devicetree/bindings/display/panel/samsung,lms380kf01.yaml
+++ b/Documentation/devicetree/bindings/display/panel/samsung,lms380kf01.yaml
@@ -31,8 +31,6 @@ properties:
configuration.
maxItems: 1
- reset-gpios: true
-
vci-supply:
description: regulator that supplies the VCI analog voltage
usually around 3.0 V
@@ -41,8 +39,6 @@ properties:
description: regulator that supplies the VCCIO voltage usually
around 1.8 V
- backlight: true
-
spi-cpha: true
spi-cpol: true
@@ -50,8 +46,6 @@ properties:
spi-max-frequency:
maximum: 1200000
- port: true
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml b/Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml
index 4cecf502a150..c04d47e59f24 100644
--- a/Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml
+++ b/Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml
@@ -23,8 +23,6 @@ properties:
reg:
maxItems: 1
- reset-gpios: true
-
vci-supply:
description: regulator that supplies the VCI analog voltage
usually around 3.0 V
@@ -33,8 +31,6 @@ properties:
description: regulator that supplies the VCCIO voltage usually
around 1.8 V
- backlight: true
-
spi-cpha: true
spi-cpol: true
@@ -44,8 +40,6 @@ properties:
maximum 300 ns minimum cycle which gives around 3 MHz max frequency
maximum: 3000000
- port: true
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6d27a1.yaml b/Documentation/devicetree/bindings/display/panel/samsung,s6d27a1.yaml
index d74904164719..0d57f97e8a76 100644
--- a/Documentation/devicetree/bindings/display/panel/samsung,s6d27a1.yaml
+++ b/Documentation/devicetree/bindings/display/panel/samsung,s6d27a1.yaml
@@ -30,8 +30,6 @@ properties:
configuration.
maxItems: 1
- reset-gpios: true
-
vci-supply:
description: regulator that supplies the VCI analog voltage
usually around 3.0 V
@@ -40,8 +38,6 @@ properties:
description: regulator that supplies the VCCIO voltage usually
around 1.8 V
- backlight: true
-
spi-cpha: true
spi-cpol: true
@@ -49,8 +45,6 @@ properties:
spi-max-frequency:
maximum: 1200000
- port: true
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6d7aa0.yaml b/Documentation/devicetree/bindings/display/panel/samsung,s6d7aa0.yaml
index 939da65114bf..1f753b706911 100644
--- a/Documentation/devicetree/bindings/display/panel/samsung,s6d7aa0.yaml
+++ b/Documentation/devicetree/bindings/display/panel/samsung,s6d7aa0.yaml
@@ -44,6 +44,8 @@ properties:
vmipi-supply:
description: VMIPI supply, usually 1.8v.
+ port: true
+
required:
- compatible
- reg
@@ -65,6 +67,12 @@ examples:
power-supply = <&display_3v3_supply>;
reset-gpios = <&gpf0 4 GPIO_ACTIVE_LOW>;
backlight = <&backlight>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&mdss_dsi0_out>;
+ };
+ };
};
};
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6e3ha8.yaml b/Documentation/devicetree/bindings/display/panel/samsung,s6e3ha8.yaml
index 05a78429aaea..00ce5a4e1c6b 100644
--- a/Documentation/devicetree/bindings/display/panel/samsung,s6e3ha8.yaml
+++ b/Documentation/devicetree/bindings/display/panel/samsung,s6e3ha8.yaml
@@ -22,10 +22,6 @@ properties:
reg:
maxItems: 1
- reset-gpios: true
-
- port: true
-
vdd3-supply:
description: VDD regulator
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml b/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml
index c47e2a1a30e5..b65f0688bdf0 100644
--- a/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml
+++ b/Documentation/devicetree/bindings/display/panel/samsung,s6e63m0.yaml
@@ -21,8 +21,6 @@ properties:
reg:
maxItems: 1
- reset-gpios: true
- port: true
default-brightness: true
max-brightness: true
diff --git a/Documentation/devicetree/bindings/display/panel/sgd,gktw70sdae4se.yaml b/Documentation/devicetree/bindings/display/panel/sgd,gktw70sdae4se.yaml
index e32d9188a3e0..1beb4ba92248 100644
--- a/Documentation/devicetree/bindings/display/panel/sgd,gktw70sdae4se.yaml
+++ b/Documentation/devicetree/bindings/display/panel/sgd,gktw70sdae4se.yaml
@@ -41,8 +41,6 @@ properties:
panel-timing: true
port: true
-additionalProperties: false
-
required:
- compatible
- port
@@ -51,6 +49,8 @@ required:
- height-mm
- panel-timing
+additionalProperties: false
+
examples:
- |+
panel {
diff --git a/Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml b/Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml
index 1e434240ea3f..044b84d8638d 100644
--- a/Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml
+++ b/Documentation/devicetree/bindings/display/panel/sitronix,st7701.yaml
@@ -49,12 +49,6 @@ properties:
If not set, the controller is in 3-line SPI mode.
Disallowed for DSI.
- port: true
- reset-gpios: true
- rotation: true
-
- backlight: true
-
required:
- compatible
- reg
diff --git a/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml b/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml
index c35d4f2ab9a4..e4fa05163d2d 100644
--- a/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml
+++ b/Documentation/devicetree/bindings/display/panel/sitronix,st7789v.yaml
@@ -24,12 +24,6 @@ properties:
reg:
maxItems: 1
- reset-gpios: true
- power-supply: true
- backlight: true
- port: true
- rotation: true
-
spi-cpha: true
spi-cpol: true
diff --git a/Documentation/devicetree/bindings/display/panel/sony,acx565akm.yaml b/Documentation/devicetree/bindings/display/panel/sony,acx565akm.yaml
index 5a8260224b74..12e5ad504001 100644
--- a/Documentation/devicetree/bindings/display/panel/sony,acx565akm.yaml
+++ b/Documentation/devicetree/bindings/display/panel/sony,acx565akm.yaml
@@ -20,10 +20,6 @@ properties:
reg:
maxItems: 1
- label: true
- reset-gpios: true
- port: true
-
required:
- compatible
- port
diff --git a/Documentation/devicetree/bindings/display/panel/startek,kd070fhfid015.yaml b/Documentation/devicetree/bindings/display/panel/startek,kd070fhfid015.yaml
index d817f998cddc..0819f38a9d2c 100644
--- a/Documentation/devicetree/bindings/display/panel/startek,kd070fhfid015.yaml
+++ b/Documentation/devicetree/bindings/display/panel/startek,kd070fhfid015.yaml
@@ -31,8 +31,6 @@ properties:
power-supply: true
-additionalProperties: false
-
required:
- compatible
- enable-gpios
@@ -42,6 +40,8 @@ required:
- port
- power-supply
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/gpio/gpio.h>
diff --git a/Documentation/devicetree/bindings/display/panel/tpo,td.yaml b/Documentation/devicetree/bindings/display/panel/tpo,td.yaml
index 7edd29df4bbb..855911588d73 100644
--- a/Documentation/devicetree/bindings/display/panel/tpo,td.yaml
+++ b/Documentation/devicetree/bindings/display/panel/tpo,td.yaml
@@ -25,11 +25,6 @@ properties:
reg:
maxItems: 1
- label: true
- reset-gpios: true
- backlight: true
- port: true
-
spi-cpha: true
spi-cpol: true
diff --git a/Documentation/devicetree/bindings/display/panel/visionox,r66451.yaml b/Documentation/devicetree/bindings/display/panel/visionox,r66451.yaml
index 187840bb76c7..49ef45c03593 100644
--- a/Documentation/devicetree/bindings/display/panel/visionox,r66451.yaml
+++ b/Documentation/devicetree/bindings/display/panel/visionox,r66451.yaml
@@ -25,8 +25,6 @@ properties:
port: true
reset-gpios: true
-additionalProperties: false
-
required:
- compatible
- reg
@@ -35,6 +33,8 @@ required:
- reset-gpios
- port
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/gpio/gpio.h>
diff --git a/Documentation/devicetree/bindings/display/panel/visionox,rm69299.yaml b/Documentation/devicetree/bindings/display/panel/visionox,rm69299.yaml
index f0a82f0ff790..f61a528c0413 100644
--- a/Documentation/devicetree/bindings/display/panel/visionox,rm69299.yaml
+++ b/Documentation/devicetree/bindings/display/panel/visionox,rm69299.yaml
@@ -36,8 +36,6 @@ properties:
port: true
reset-gpios: true
-additionalProperties: false
-
required:
- compatible
- reg
@@ -46,6 +44,8 @@ required:
- reset-gpios
- port
+additionalProperties: false
+
examples:
- |
dsi {
diff --git a/Documentation/devicetree/bindings/display/panel/visionox,vtdr6130.yaml b/Documentation/devicetree/bindings/display/panel/visionox,vtdr6130.yaml
index d5a8295106c1..c99f4146f1bb 100644
--- a/Documentation/devicetree/bindings/display/panel/visionox,vtdr6130.yaml
+++ b/Documentation/devicetree/bindings/display/panel/visionox,vtdr6130.yaml
@@ -26,8 +26,6 @@ properties:
port: true
reset-gpios: true
-additionalProperties: false
-
required:
- compatible
- reg
@@ -37,6 +35,8 @@ required:
- reset-gpios
- port
+additionalProperties: false
+
examples:
- |
#include <dt-bindings/gpio/gpio.h>
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 1ef679f88203..3ec0951c09c1 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -973,6 +973,8 @@ patternProperties:
description: Liebherr-Werk Nenzing GmbH
"^lxa,.*":
description: Linux Automation GmbH
+ "^lxd,.*":
+ description: LXD Research & Display, LLC
"^m5stack,.*":
description: M5Stack
"^macnica,.*":
@@ -1610,6 +1612,8 @@ patternProperties:
"^synopsys,.*":
description: Synopsys, Inc. (deprecated, use snps)
deprecated: true
+ "^taiguanck,.*":
+ description: Shenzhen Top Group Technology Co., Ltd.
"^taos,.*":
description: Texas Advanced Optoelectronic Solutions Inc.
"^tbs,.*":
diff --git a/MAINTAINERS b/MAINTAINERS
index afff2d0e7761..00b05e5328be 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -8059,6 +8059,12 @@ F: Documentation/devicetree/bindings/display/lvds.yaml
F: Documentation/devicetree/bindings/display/panel/panel-lvds.yaml
F: drivers/gpu/drm/panel/panel-lvds.c
+DRM DRIVER FOR LXD M9189A PANELS
+M: Michael Tretter <m.tretter@pengutronix.de>
+S: Maintained
+F: Documentation/devicetree/bindings/display/panel/lxd,m9189a.yaml
+F: drivers/gpu/drm/panel/panel-lxd-m9189a.c
+
DRM DRIVER FOR MANTIX MLAF057WE51 PANELS
M: Guido Günther <agx@sigxcpu.org>
R: Purism Kernel Team <kernel@puri.sm>
@@ -8916,11 +8922,11 @@ R: Christian Koenig <christian.koenig@amd.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
-F: drivers/gpu/drm_buddy.c
F: drivers/gpu/buddy.c
+F: drivers/gpu/drm/drm_buddy.c
F: drivers/gpu/tests/gpu_buddy_test.c
-F: include/linux/gpu_buddy.h
F: include/drm/drm_buddy.h
+F: include/linux/gpu_buddy.h
DRM AUTOMATED TESTING
M: Helen Koike <helen.fornazier@gmail.com>
@@ -12985,7 +12991,6 @@ F: drivers/crypto/intel/ixp4xx/ixp4xx_crypto.c
INTEL KEEM BAY DRM DRIVER
M: Anitha Chrisanthus <anitha.chrisanthus@intel.com>
-M: Edmund Dea <edmund.j.dea@intel.com>
S: Maintained
F: Documentation/devicetree/bindings/display/intel,keembay-display.yaml
F: drivers/gpu/drm/kmb/
diff --git a/drivers/accel/amdxdna/aie2_pci.c b/drivers/accel/amdxdna/aie2_pci.c
index 4924a9da55b6..ddd3d82f3426 100644
--- a/drivers/accel/amdxdna/aie2_pci.c
+++ b/drivers/accel/amdxdna/aie2_pci.c
@@ -10,6 +10,7 @@
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
#include <drm/gpu_scheduler.h>
+#include <linux/amd-pmf-io.h>
#include <linux/cleanup.h>
#include <linux/errno.h>
#include <linux/firmware.h>
@@ -791,6 +792,55 @@ static int aie2_get_clock_metadata(struct amdxdna_client *client,
return ret;
}
+static int aie2_get_sensors(struct amdxdna_client *client,
+ struct amdxdna_drm_get_info *args)
+{
+ struct amdxdna_dev_hdl *ndev = client->xdna->dev_handle;
+ struct amdxdna_drm_query_sensor sensor = {};
+ struct amd_pmf_npu_metrics npu_metrics;
+ u32 sensors_count = 0, i;
+ int ret;
+
+ ret = AIE2_GET_PMF_NPU_METRICS(&npu_metrics);
+ if (ret)
+ return ret;
+
+ sensor.type = AMDXDNA_SENSOR_TYPE_POWER;
+ sensor.input = npu_metrics.npu_power;
+ sensor.unitm = -3;
+ scnprintf(sensor.label, sizeof(sensor.label), "Total Power");
+ scnprintf(sensor.units, sizeof(sensor.units), "mW");
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer), &sensor, sizeof(sensor)))
+ return -EFAULT;
+
+ sensors_count++;
+ if (args->buffer_size <= sensors_count * sizeof(sensor))
+ goto out;
+
+ for (i = 0; i < min_t(u32, ndev->total_col, 8); i++) {
+ memset(&sensor, 0, sizeof(sensor));
+ sensor.input = npu_metrics.npu_busy[i];
+ sensor.type = AMDXDNA_SENSOR_TYPE_COLUMN_UTILIZATION;
+ sensor.unitm = 0;
+ scnprintf(sensor.label, sizeof(sensor.label), "Column %d Utilization", i);
+ scnprintf(sensor.units, sizeof(sensor.units), "%%");
+
+ if (copy_to_user(u64_to_user_ptr(args->buffer) + sensors_count * sizeof(sensor),
+ &sensor, sizeof(sensor)))
+ return -EFAULT;
+
+ sensors_count++;
+ if (args->buffer_size <= sensors_count * sizeof(sensor))
+ goto out;
+ }
+
+out:
+ args->buffer_size = sensors_count * sizeof(sensor);
+
+ return 0;
+}
+
static int aie2_hwctx_status_cb(struct amdxdna_hwctx *hwctx, void *arg)
{
struct amdxdna_drm_hwctx_entry *tmp __free(kfree) = NULL;
@@ -994,6 +1044,9 @@ static int aie2_get_info(struct amdxdna_client *client, struct amdxdna_drm_get_i
case DRM_AMDXDNA_QUERY_CLOCK_METADATA:
ret = aie2_get_clock_metadata(client, args);
break;
+ case DRM_AMDXDNA_QUERY_SENSORS:
+ ret = aie2_get_sensors(client, args);
+ break;
case DRM_AMDXDNA_QUERY_HW_CONTEXTS:
ret = aie2_get_hwctx_status(client, args);
break;
diff --git a/drivers/accel/amdxdna/aie2_pci.h b/drivers/accel/amdxdna/aie2_pci.h
index e72311c77996..c0959e6937ba 100644
--- a/drivers/accel/amdxdna/aie2_pci.h
+++ b/drivers/accel/amdxdna/aie2_pci.h
@@ -7,6 +7,7 @@
#define _AIE2_PCI_H_
#include <drm/amdxdna_accel.h>
+#include <linux/limits.h>
#include <linux/semaphore.h>
#include "amdxdna_mailbox.h"
@@ -46,6 +47,33 @@
pci_resource_len(NDEV2PDEV(_ndev), (_ndev)->xdna->dev_info->mbox_bar); \
})
+#if IS_ENABLED(CONFIG_AMD_PMF)
+#define AIE2_GET_PMF_NPU_METRICS(metrics) amd_pmf_get_npu_data(metrics)
+#define AIE2_GET_PMF_NPU_DATA(field, val) \
+({ \
+ struct amd_pmf_npu_metrics _npu_metrics; \
+ int _ret; \
+ \
+ _ret = amd_pmf_get_npu_data(&_npu_metrics); \
+ val = _ret ? U32_MAX : _npu_metrics.field; \
+ (_ret); \
+})
+#else
+#define AIE2_GET_PMF_NPU_METRICS(metrics) \
+({ \
+ typeof(metrics) _m = metrics; \
+ memset(_m, 0xff, sizeof(*_m)); \
+ (-EOPNOTSUPP); \
+})
+
+#define SENSOR_DEFAULT_npu_power U32_MAX
+#define AIE2_GET_PMF_NPU_DATA(field, val) \
+({ \
+ val = SENSOR_DEFAULT_##field; \
+ (-EOPNOTSUPP); \
+})
+#endif
+
enum aie2_smu_reg_idx {
SMU_CMD_REG = 0,
SMU_ARG_REG,
diff --git a/drivers/accel/amdxdna/amdxdna_pci_drv.c b/drivers/accel/amdxdna/amdxdna_pci_drv.c
index a4384593bdcc..d6d12355bd2b 100644
--- a/drivers/accel/amdxdna/amdxdna_pci_drv.c
+++ b/drivers/accel/amdxdna/amdxdna_pci_drv.c
@@ -35,9 +35,10 @@ MODULE_FIRMWARE("amdnpu/17f0_11/npu_7.sbin");
* 0.4: Support getting resource information
* 0.5: Support getting telemetry data
* 0.6: Support preemption
+ * 0.7: Support getting power and utilization data
*/
#define AMDXDNA_DRIVER_MAJOR 0
-#define AMDXDNA_DRIVER_MINOR 6
+#define AMDXDNA_DRIVER_MINOR 7
/*
* Bind the driver base on (vendor_id, device_id) pair and later use the
@@ -358,5 +359,6 @@ static struct pci_driver amdxdna_pci_driver = {
module_pci_driver(amdxdna_pci_driver);
MODULE_LICENSE("GPL");
+MODULE_IMPORT_NS("AMD_PMF");
MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>");
MODULE_DESCRIPTION("amdxdna driver");
diff --git a/drivers/accel/ivpu/ivpu_gem.c b/drivers/accel/ivpu/ivpu_gem.c
index 98b9ce26962b..4f2005a8d496 100644
--- a/drivers/accel/ivpu/ivpu_gem.c
+++ b/drivers/accel/ivpu/ivpu_gem.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (C) 2020-2023 Intel Corporation
+ * Copyright (C) 2020-2026 Intel Corporation
*/
#include <linux/dma-buf.h>
@@ -31,7 +31,7 @@ static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, con
"%6s: bo %8p size %9zu ctx %d vpu_addr %9llx pages %d sgt %d mmu_mapped %d wc %d imported %d\n",
action, bo, ivpu_bo_size(bo), bo->ctx_id, bo->vpu_addr,
(bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc,
- (bool)drm_gem_is_imported(&bo->base.base));
+ drm_gem_is_imported(&bo->base.base));
}
static inline int ivpu_bo_lock(struct ivpu_bo *bo)
@@ -48,7 +48,7 @@ static struct sg_table *ivpu_bo_map_attachment(struct ivpu_device *vdev, struct
{
struct sg_table *sgt;
- drm_WARN_ON(&vdev->drm, !bo->base.base.import_attach);
+ drm_WARN_ON(&vdev->drm, !drm_gem_is_imported(&bo->base.base));
ivpu_bo_lock(bo);
@@ -81,7 +81,7 @@ int __must_check ivpu_bo_bind(struct ivpu_bo *bo)
ivpu_dbg_bo(vdev, bo, "bind");
- if (bo->base.base.import_attach)
+ if (drm_gem_is_imported(&bo->base.base))
sgt = ivpu_bo_map_attachment(vdev, bo);
else
sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
@@ -157,7 +157,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
}
if (bo->base.sgt) {
- if (bo->base.base.import_attach) {
+ if (drm_gem_is_imported(&bo->base.base)) {
dma_buf_unmap_attachment(bo->base.base.import_attach,
bo->base.sgt, DMA_BIDIRECTIONAL);
} else {
@@ -195,7 +195,7 @@ struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t siz
if (size == 0 || !PAGE_ALIGNED(size))
return ERR_PTR(-EINVAL);
- bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ bo = kzalloc_obj(*bo);
if (!bo)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/accel/ivpu/ivpu_hw_40xx_reg.h b/drivers/accel/ivpu/ivpu_hw_40xx_reg.h
index 421242acb184..fc0ee8d637f9 100644
--- a/drivers/accel/ivpu/ivpu_hw_40xx_reg.h
+++ b/drivers/accel/ivpu/ivpu_hw_40xx_reg.h
@@ -121,12 +121,6 @@
#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY 0x0003006cu
#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY_STATUS_DLY_MASK GENMASK(7, 0)
-#define VPU_40XX_HOST_SS_AON_RETENTION0 0x0003000cu
-#define VPU_40XX_HOST_SS_AON_RETENTION1 0x00030010u
-#define VPU_40XX_HOST_SS_AON_RETENTION2 0x00030014u
-#define VPU_40XX_HOST_SS_AON_RETENTION3 0x00030018u
-#define VPU_40XX_HOST_SS_AON_RETENTION4 0x0003001cu
-
#define VPU_40XX_HOST_SS_AON_IDLE_GEN 0x00030200u
#define VPU_40XX_HOST_SS_AON_IDLE_GEN_EN_MASK BIT_MASK(0)
#define VPU_40XX_HOST_SS_AON_IDLE_GEN_HW_PG_EN_MASK BIT_MASK(1)
diff --git a/drivers/accel/ivpu/ivpu_hw_ip.c b/drivers/accel/ivpu/ivpu_hw_ip.c
index 959984c54341..37f95a0551ed 100644
--- a/drivers/accel/ivpu/ivpu_hw_ip.c
+++ b/drivers/accel/ivpu/ivpu_hw_ip.c
@@ -931,7 +931,6 @@ static int soc_cpu_boot_40xx(struct ivpu_device *vdev)
static int soc_cpu_boot_60xx(struct ivpu_device *vdev)
{
- REGV_WR64(VPU_40XX_HOST_SS_AON_RETENTION1, vdev->fw->mem_bp->vpu_addr);
soc_cpu_set_entry_point_40xx(vdev, vdev->fw->cold_boot_entry_point);
return 0;
diff --git a/drivers/dma-buf/heaps/cma_heap.c b/drivers/dma-buf/heaps/cma_heap.c
index bd3370b9a3f6..92865786cfc9 100644
--- a/drivers/dma-buf/heaps/cma_heap.c
+++ b/drivers/dma-buf/heaps/cma_heap.c
@@ -329,10 +329,7 @@ static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
struct page *page = cma_pages;
while (nr_clear_pages > 0) {
- void *vaddr = kmap_local_page(page);
-
- clear_page(vaddr);
- kunmap_local(vaddr);
+ clear_highpage(page);
/*
* Avoid wasting time zeroing memory if the process
* has been killed by SIGKILL.
@@ -343,7 +340,7 @@ static struct dma_buf *cma_heap_allocate(struct dma_heap *heap,
nr_clear_pages--;
}
} else {
- memset(page_address(cma_pages), 0, size);
+ clear_pages(page_address(cma_pages), pagecount);
}
buffer->pages = kmalloc_objs(*buffer->pages, pagecount);
diff --git a/drivers/gpu/buddy.c b/drivers/gpu/buddy.c
index da5a1222f46b..52686672e99f 100644
--- a/drivers/gpu/buddy.c
+++ b/drivers/gpu/buddy.c
@@ -53,6 +53,25 @@ gpu_buddy_block_is_split(struct gpu_buddy_block *block)
return gpu_buddy_block_state(block) == GPU_BUDDY_SPLIT;
}
+static unsigned int gpu_buddy_block_offset_alignment(struct gpu_buddy_block *block)
+{
+ u64 offset = gpu_buddy_block_offset(block);
+
+ if (!offset)
+ /*
+ * __ffs64(0) is undefined; offset 0 is maximally aligned, so return
+ * a value greater than any possible alignment.
+ */
+ return 64 + 1;
+
+ return __ffs64(offset);
+}
+
+RB_DECLARE_CALLBACKS_MAX(static, gpu_buddy_augment_cb,
+ struct gpu_buddy_block, rb,
+ unsigned int, subtree_max_alignment,
+ gpu_buddy_block_offset_alignment);
+
static struct gpu_buddy_block *gpu_block_alloc(struct gpu_buddy *mm,
struct gpu_buddy_block *parent,
unsigned int order,
@@ -106,26 +125,42 @@ static bool rbtree_is_empty(struct rb_root *root)
return RB_EMPTY_ROOT(root);
}
-static bool gpu_buddy_block_offset_less(const struct gpu_buddy_block *block,
- const struct gpu_buddy_block *node)
-{
- return gpu_buddy_block_offset(block) < gpu_buddy_block_offset(node);
-}
-
-static bool rbtree_block_offset_less(struct rb_node *block,
- const struct rb_node *node)
-{
- return gpu_buddy_block_offset_less(rbtree_get_free_block(block),
- rbtree_get_free_block(node));
-}
-
static void rbtree_insert(struct gpu_buddy *mm,
struct gpu_buddy_block *block,
enum gpu_buddy_free_tree tree)
{
- rb_add(&block->rb,
- &mm->free_trees[tree][gpu_buddy_block_order(block)],
- rbtree_block_offset_less);
+ struct rb_node **link, *parent = NULL;
+ unsigned int block_alignment, order;
+ struct gpu_buddy_block *node;
+ struct rb_root *root;
+
+ order = gpu_buddy_block_order(block);
+ block_alignment = gpu_buddy_block_offset_alignment(block);
+
+ root = &mm->free_trees[tree][order];
+ link = &root->rb_node;
+
+ while (*link) {
+ parent = *link;
+ node = rbtree_get_free_block(parent);
+ /*
+ * Manual augmentation update during insertion traversal. Required
+ * because rb_insert_augmented() only calls rotate callback during
+ * rotations. This ensures all ancestors on the insertion path have
+ * correct subtree_max_alignment values.
+ */
+ if (node->subtree_max_alignment < block_alignment)
+ node->subtree_max_alignment = block_alignment;
+
+ if (gpu_buddy_block_offset(block) < gpu_buddy_block_offset(node))
+ link = &parent->rb_left;
+ else
+ link = &parent->rb_right;
+ }
+
+ block->subtree_max_alignment = block_alignment;
+ rb_link_node(&block->rb, parent, link);
+ rb_insert_augmented(&block->rb, root, &gpu_buddy_augment_cb);
}
static void rbtree_remove(struct gpu_buddy *mm,
@@ -138,7 +173,7 @@ static void rbtree_remove(struct gpu_buddy *mm,
tree = get_block_tree(block);
root = &mm->free_trees[tree][order];
- rb_erase(&block->rb, root);
+ rb_erase_augmented(&block->rb, root, &gpu_buddy_augment_cb);
RB_CLEAR_NODE(&block->rb);
}
@@ -811,6 +846,127 @@ err_undo:
return ERR_PTR(err);
}
+static bool
+gpu_buddy_can_offset_align(u64 size, u64 min_block_size)
+{
+ return size < min_block_size && is_power_of_2(size);
+}
+
+static bool gpu_buddy_subtree_can_satisfy(struct rb_node *node,
+ unsigned int alignment)
+{
+ struct gpu_buddy_block *block;
+
+ block = rbtree_get_free_block(node);
+ return block->subtree_max_alignment >= alignment;
+}
+
+static struct gpu_buddy_block *
+gpu_buddy_find_block_aligned(struct gpu_buddy *mm,
+ enum gpu_buddy_free_tree tree,
+ unsigned int order,
+ unsigned int alignment,
+ unsigned long flags)
+{
+ struct rb_root *root = &mm->free_trees[tree][order];
+ struct rb_node *rb = root->rb_node;
+
+ while (rb) {
+ struct gpu_buddy_block *block = rbtree_get_free_block(rb);
+ struct rb_node *left_node = rb->rb_left, *right_node = rb->rb_right;
+
+ if (right_node) {
+ if (gpu_buddy_subtree_can_satisfy(right_node, alignment)) {
+ rb = right_node;
+ continue;
+ }
+ }
+
+ if (gpu_buddy_block_offset_alignment(block) >= alignment)
+ return block;
+
+ if (left_node) {
+ if (gpu_buddy_subtree_can_satisfy(left_node, alignment)) {
+ rb = left_node;
+ continue;
+ }
+ }
+
+ break;
+ }
+
+ return NULL;
+}
+
+static struct gpu_buddy_block *
+gpu_buddy_offset_aligned_allocation(struct gpu_buddy *mm,
+ u64 size,
+ u64 min_block_size,
+ unsigned long flags)
+{
+ struct gpu_buddy_block *block = NULL;
+ unsigned int order, tmp, alignment;
+ struct gpu_buddy_block *buddy;
+ enum gpu_buddy_free_tree tree;
+ unsigned long pages;
+ int err;
+
+ alignment = ilog2(min_block_size);
+ pages = size >> ilog2(mm->chunk_size);
+ order = fls(pages) - 1;
+
+ tree = (flags & GPU_BUDDY_CLEAR_ALLOCATION) ?
+ GPU_BUDDY_CLEAR_TREE : GPU_BUDDY_DIRTY_TREE;
+
+ for (tmp = order; tmp <= mm->max_order; ++tmp) {
+ block = gpu_buddy_find_block_aligned(mm, tree, tmp,
+ alignment, flags);
+ if (!block) {
+ tree = (tree == GPU_BUDDY_CLEAR_TREE) ?
+ GPU_BUDDY_DIRTY_TREE : GPU_BUDDY_CLEAR_TREE;
+ block = gpu_buddy_find_block_aligned(mm, tree, tmp,
+ alignment, flags);
+ }
+
+ if (block)
+ break;
+ }
+
+ if (!block)
+ return ERR_PTR(-ENOSPC);
+
+ while (gpu_buddy_block_order(block) > order) {
+ struct gpu_buddy_block *left, *right;
+
+ err = split_block(mm, block);
+ if (unlikely(err))
+ goto err_undo;
+
+ left = block->left;
+ right = block->right;
+
+ if (gpu_buddy_block_offset_alignment(right) >= alignment)
+ block = right;
+ else
+ block = left;
+ }
+
+ return block;
+
+err_undo:
+ /*
+ * We really don't want to leave around a bunch of split blocks, since
+ * bigger is better, so make sure we merge everything back before we
+ * free the allocated blocks.
+ */
+ buddy = __get_buddy(block);
+ if (buddy &&
+ (gpu_buddy_block_is_free(block) &&
+ gpu_buddy_block_is_free(buddy)))
+ __gpu_buddy_free(mm, block, false);
+ return ERR_PTR(err);
+}
+
static int __alloc_range(struct gpu_buddy *mm,
struct list_head *dfs,
u64 start, u64 size,
@@ -1080,6 +1236,7 @@ EXPORT_SYMBOL(gpu_buddy_block_trim);
static struct gpu_buddy_block *
__gpu_buddy_alloc_blocks(struct gpu_buddy *mm,
u64 start, u64 end,
+ u64 size, u64 min_block_size,
unsigned int order,
unsigned long flags)
{
@@ -1087,6 +1244,11 @@ __gpu_buddy_alloc_blocks(struct gpu_buddy *mm,
/* Allocate traversing within the range */
return __gpu_buddy_alloc_range_bias(mm, start, end,
order, flags);
+ else if (size < min_block_size)
+ /* Allocate from an offset-aligned region without size rounding */
+ return gpu_buddy_offset_aligned_allocation(mm, size,
+ min_block_size,
+ flags);
else
/* Allocate from freetree */
return alloc_from_freetree(mm, order, flags);
@@ -1158,8 +1320,11 @@ int gpu_buddy_alloc_blocks(struct gpu_buddy *mm,
if (flags & GPU_BUDDY_CONTIGUOUS_ALLOCATION) {
size = roundup_pow_of_two(size);
min_block_size = size;
- /* Align size value to min_block_size */
- } else if (!IS_ALIGNED(size, min_block_size)) {
+ /*
+ * Normalize the requested size to min_block_size for regular allocations.
+ * Offset-aligned allocations intentionally skip size rounding.
+ */
+ } else if (!gpu_buddy_can_offset_align(size, min_block_size)) {
size = round_up(size, min_block_size);
}
@@ -1179,43 +1344,60 @@ int gpu_buddy_alloc_blocks(struct gpu_buddy *mm,
do {
order = min(order, (unsigned int)fls(pages) - 1);
BUG_ON(order > mm->max_order);
- BUG_ON(order < min_order);
+ /*
+ * Regular allocations must not allocate blocks smaller than min_block_size.
+ * Offset-aligned allocations deliberately bypass this constraint.
+ */
+ BUG_ON(size >= min_block_size && order < min_order);
do {
+ unsigned int fallback_order;
+
block = __gpu_buddy_alloc_blocks(mm, start,
end,
+ size,
+ min_block_size,
order,
flags);
if (!IS_ERR(block))
break;
- if (order-- == min_order) {
- /* Try allocation through force merge method */
- if (mm->clear_avail &&
- !__force_merge(mm, start, end, min_order)) {
- block = __gpu_buddy_alloc_blocks(mm, start,
- end,
- min_order,
- flags);
- if (!IS_ERR(block)) {
- order = min_order;
- break;
- }
- }
+ if (size < min_block_size) {
+ fallback_order = order;
+ } else if (order == min_order) {
+ fallback_order = min_order;
+ } else {
+ order--;
+ continue;
+ }
- /*
- * Try contiguous block allocation through
- * try harder method.
- */
- if (flags & GPU_BUDDY_CONTIGUOUS_ALLOCATION &&
- !(flags & GPU_BUDDY_RANGE_ALLOCATION))
- return __alloc_contig_try_harder(mm,
- original_size,
- original_min_size,
- blocks);
- err = -ENOSPC;
- goto err_free;
+ /* Try allocation through force merge method */
+ if (mm->clear_avail &&
+ !__force_merge(mm, start, end, fallback_order)) {
+ block = __gpu_buddy_alloc_blocks(mm, start,
+ end,
+ size,
+ min_block_size,
+ fallback_order,
+ flags);
+ if (!IS_ERR(block)) {
+ order = fallback_order;
+ break;
+ }
}
+
+ /*
+ * Try contiguous block allocation through
+ * try harder method.
+ */
+ if (flags & GPU_BUDDY_CONTIGUOUS_ALLOCATION &&
+ !(flags & GPU_BUDDY_RANGE_ALLOCATION))
+ return __alloc_contig_try_harder(mm,
+ original_size,
+ original_min_size,
+ blocks);
+ err = -ENOSPC;
+ goto err_free;
} while (1);
mark_allocated(mm, block);
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index 006d49d6b4af..9c9c73b73ac8 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -86,7 +86,7 @@ amdgpu-y += \
nbio_v7_2.o hdp_v4_0.o hdp_v5_0.o aldebaran_reg_init.o aldebaran.o soc21.o soc24.o \
sienna_cichlid.o smu_v13_0_10.o nbio_v4_3.o hdp_v6_0.o nbio_v7_7.o hdp_v5_2.o lsdma_v6_0.o \
nbio_v7_9.o aqua_vanjaram.o nbio_v7_11.o lsdma_v7_0.o hdp_v7_0.o nbif_v6_3_1.o \
- cyan_skillfish_reg_init.o soc_v1_0.o
+ cyan_skillfish_reg_init.o soc_v1_0.o lsdma_v7_1.o
# add DF block
amdgpu-y += \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index d1bf2e150c1a..aabe9d58c3dc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -246,10 +246,10 @@ amdgpu_connector_find_encoder(struct drm_connector *connector,
return NULL;
}
-static struct edid *
+static const struct drm_edid *
amdgpu_connector_get_hardcoded_edid(struct amdgpu_device *adev)
{
- return drm_edid_duplicate(drm_edid_raw(adev->mode_info.bios_hardcoded_edid));
+ return drm_edid_dup(adev->mode_info.bios_hardcoded_edid);
}
static void amdgpu_connector_get_edid(struct drm_connector *connector)
@@ -268,8 +268,8 @@ static void amdgpu_connector_get_edid(struct drm_connector *connector)
if ((amdgpu_connector_encoder_get_dp_bridge_encoder_id(connector) !=
ENCODER_OBJECT_ID_NONE) &&
amdgpu_connector->ddc_bus->has_aux) {
- amdgpu_connector->edid = drm_get_edid(connector,
- &amdgpu_connector->ddc_bus->aux.ddc);
+ amdgpu_connector->edid = drm_edid_read_ddc(connector,
+ &amdgpu_connector->ddc_bus->aux.ddc);
} else if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
(connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
struct amdgpu_connector_atom_dig *dig = amdgpu_connector->con_priv;
@@ -277,14 +277,14 @@ static void amdgpu_connector_get_edid(struct drm_connector *connector)
if ((dig->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT ||
dig->dp_sink_type == CONNECTOR_OBJECT_ID_eDP) &&
amdgpu_connector->ddc_bus->has_aux)
- amdgpu_connector->edid = drm_get_edid(connector,
- &amdgpu_connector->ddc_bus->aux.ddc);
+ amdgpu_connector->edid = drm_edid_read_ddc(connector,
+ &amdgpu_connector->ddc_bus->aux.ddc);
else if (amdgpu_connector->ddc_bus)
- amdgpu_connector->edid = drm_get_edid(connector,
- &amdgpu_connector->ddc_bus->adapter);
+ amdgpu_connector->edid = drm_edid_read_ddc(connector,
+ &amdgpu_connector->ddc_bus->adapter);
} else if (amdgpu_connector->ddc_bus) {
- amdgpu_connector->edid = drm_get_edid(connector,
- &amdgpu_connector->ddc_bus->adapter);
+ amdgpu_connector->edid = drm_edid_read_ddc(connector,
+ &amdgpu_connector->ddc_bus->adapter);
}
if (!amdgpu_connector->edid) {
@@ -292,30 +292,22 @@ static void amdgpu_connector_get_edid(struct drm_connector *connector)
if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) ||
(connector->connector_type == DRM_MODE_CONNECTOR_eDP))) {
amdgpu_connector->edid = amdgpu_connector_get_hardcoded_edid(adev);
- drm_connector_update_edid_property(connector, amdgpu_connector->edid);
+ drm_edid_connector_update(connector, amdgpu_connector->edid);
}
}
}
-static void amdgpu_connector_free_edid(struct drm_connector *connector)
-{
- struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
-
- kfree(amdgpu_connector->edid);
- amdgpu_connector->edid = NULL;
-}
-
static int amdgpu_connector_ddc_get_modes(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
int ret;
if (amdgpu_connector->edid) {
- drm_connector_update_edid_property(connector, amdgpu_connector->edid);
- ret = drm_add_edid_modes(connector, amdgpu_connector->edid);
+ drm_edid_connector_update(connector, amdgpu_connector->edid);
+ ret = drm_edid_connector_add_modes(connector);
return ret;
}
- drm_connector_update_edid_property(connector, NULL);
+ drm_edid_connector_update(connector, NULL);
return 0;
}
@@ -754,7 +746,7 @@ static void amdgpu_connector_destroy(struct drm_connector *connector)
{
struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
- amdgpu_connector_free_edid(connector);
+ drm_edid_free(amdgpu_connector->edid);
kfree(amdgpu_connector->con_priv);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
@@ -873,7 +865,7 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
dret = amdgpu_display_ddc_probe(amdgpu_connector, false);
if (dret) {
amdgpu_connector->detected_by_load = false;
- amdgpu_connector_free_edid(connector);
+ drm_edid_free(amdgpu_connector->edid);
amdgpu_connector_get_edid(connector);
if (!amdgpu_connector->edid) {
@@ -883,13 +875,13 @@ amdgpu_connector_vga_detect(struct drm_connector *connector, bool force)
ret = connector_status_connected;
} else {
amdgpu_connector->use_digital =
- !!(amdgpu_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
+ drm_edid_is_digital(amdgpu_connector->edid);
/* some oems have boards with separate digital and analog connectors
* with a shared ddc line (often vga + hdmi)
*/
if (amdgpu_connector->use_digital && amdgpu_connector->shared_ddc) {
- amdgpu_connector_free_edid(connector);
+ drm_edid_free(amdgpu_connector->edid);
ret = connector_status_disconnected;
} else {
ret = connector_status_connected;
@@ -984,7 +976,7 @@ static void amdgpu_connector_shared_ddc(enum drm_connector_status *status,
/* hpd is our only option in this case */
if (!amdgpu_display_hpd_sense(adev,
amdgpu_connector->hpd.hpd)) {
- amdgpu_connector_free_edid(connector);
+ drm_edid_free(amdgpu_connector->edid);
*status = connector_status_disconnected;
}
}
@@ -1053,7 +1045,7 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
}
if (dret) {
amdgpu_connector->detected_by_load = false;
- amdgpu_connector_free_edid(connector);
+ drm_edid_free(amdgpu_connector->edid);
amdgpu_connector_get_edid(connector);
if (!amdgpu_connector->edid) {
@@ -1063,13 +1055,13 @@ amdgpu_connector_dvi_detect(struct drm_connector *connector, bool force)
broken_edid = true; /* defer use_digital to later */
} else {
amdgpu_connector->use_digital =
- !!(amdgpu_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
+ drm_edid_is_digital(amdgpu_connector->edid);
/* some oems have boards with separate digital and analog connectors
* with a shared ddc line (often vga + hdmi)
*/
if ((!amdgpu_connector->use_digital) && amdgpu_connector->shared_ddc) {
- amdgpu_connector_free_edid(connector);
+ drm_edid_free(amdgpu_connector->edid);
ret = connector_status_disconnected;
} else {
ret = connector_status_connected;
@@ -1417,7 +1409,7 @@ amdgpu_connector_dp_detect(struct drm_connector *connector, bool force)
goto out;
}
- amdgpu_connector_free_edid(connector);
+ drm_edid_free(amdgpu_connector->edid);
if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) ||
(connector->connector_type == DRM_MODE_CONNECTOR_LVDS)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index b42f866935ab..aa9239b310a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -39,6 +39,7 @@
#include "amdgpu_reset.h"
#include "amdgpu_psp_ta.h"
+#include "amdgpu_userq.h"
#if defined(CONFIG_DEBUG_FS)
@@ -2156,6 +2157,53 @@ static const struct file_operations amdgpu_pt_info_fops = {
.release = single_release,
};
+static int amdgpu_mqd_info_read(struct seq_file *m, void *unused)
+{
+ struct amdgpu_usermode_queue *queue = m->private;
+ struct amdgpu_bo *bo;
+ int r;
+
+ if (!queue || !queue->mqd.obj)
+ return -EINVAL;
+
+ bo = amdgpu_bo_ref(queue->mqd.obj);
+ r = amdgpu_bo_reserve(bo, true);
+ if (r) {
+ amdgpu_bo_unref(&bo);
+ return -EINVAL;
+ }
+
+ seq_printf(m, "queue_type: %d\n", queue->queue_type);
+ seq_printf(m, "mqd_gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(queue->mqd.obj));
+
+ amdgpu_bo_unreserve(bo);
+ amdgpu_bo_unref(&bo);
+
+ return 0;
+}
+
+static int amdgpu_mqd_info_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, amdgpu_mqd_info_read, inode->i_private);
+}
+
+static const struct file_operations amdgpu_mqd_info_fops = {
+ .owner = THIS_MODULE,
+ .open = amdgpu_mqd_info_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void amdgpu_debugfs_userq_init(struct drm_file *file, struct amdgpu_usermode_queue *queue, int qid)
+{
+ char queue_name[32];
+
+ scnprintf(queue_name, sizeof(queue_name), "queue_%d", qid);
+ queue->debugfs_queue = debugfs_create_dir(queue_name, file->debugfs_client);
+ debugfs_create_file("mqd_info", 0444, queue->debugfs_queue, queue, &amdgpu_mqd_info_fops);
+}
+
void amdgpu_debugfs_vm_init(struct drm_file *file)
{
debugfs_create_file("vm_pagetable_info", 0444, file->debugfs_client, file,
@@ -2174,4 +2222,9 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
void amdgpu_debugfs_vm_init(struct drm_file *file)
{
}
+void amdgpu_debugfs_userq_init(struct drm_file *file,
+ struct amdgpu_usermode_queue *queue,
+ int qid)
+{
+}
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
index e7b3c38e5186..e88b4a1e564c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
@@ -25,6 +25,7 @@
/*
* Debugfs
*/
+struct amdgpu_usermode_queue;
int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
int amdgpu_debugfs_init(struct amdgpu_device *adev);
@@ -34,4 +35,7 @@ void amdgpu_debugfs_firmware_init(struct amdgpu_device *adev);
void amdgpu_debugfs_gem_init(struct amdgpu_device *adev);
void amdgpu_debugfs_mes_event_log_init(struct amdgpu_device *adev);
void amdgpu_debugfs_vm_init(struct drm_file *file);
+void amdgpu_debugfs_userq_init(struct drm_file *file,
+ struct amdgpu_usermode_queue *queue,
+ int qid);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
index c38e7371bafc..160f0704d1d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
@@ -261,6 +261,8 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
}
}
+ amdgpu_discovery_dump(coredump->adev, &p);
+
/* IP firmware information */
drm_printf(&p, "\nIP Firmwares\n");
amdgpu_devcoredump_fw_info(coredump->adev, &p);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6f6973e8cd53..711b4502653a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1995,8 +1995,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
break;
default:
r = amdgpu_discovery_set_ip_blocks(adev);
- if (r)
+ if (r) {
+ adev->num_ip_blocks = 0;
return r;
+ }
break;
}
@@ -2550,6 +2552,8 @@ int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
if (!adev->ip_blocks[i].status.late_initialized)
continue;
+ if (!adev->ip_blocks[i].version)
+ continue;
/* skip CG for GFX, SDMA on S0ix */
if (adev->in_s0ix &&
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
@@ -2589,6 +2593,8 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
if (!adev->ip_blocks[i].status.late_initialized)
continue;
+ if (!adev->ip_blocks[i].version)
+ continue;
/* skip PG for GFX, SDMA on S0ix */
if (adev->in_s0ix &&
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
@@ -2796,6 +2802,8 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].version)
+ continue;
if (!adev->ip_blocks[i].version->funcs->early_fini)
continue;
@@ -2873,6 +2881,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
if (!adev->ip_blocks[i].status.sw)
continue;
+ if (!adev->ip_blocks[i].version)
+ continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
amdgpu_ucode_free_bo(adev);
amdgpu_free_static_csa(&adev->virt.csa_obj);
@@ -2899,6 +2909,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.late_initialized)
continue;
+ if (!adev->ip_blocks[i].version)
+ continue;
if (adev->ip_blocks[i].version->funcs->late_fini)
adev->ip_blocks[i].version->funcs->late_fini(&adev->ip_blocks[i]);
adev->ip_blocks[i].status.late_initialized = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index 6c8b3c2687dc..f9f785c5d8ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -87,6 +87,7 @@
#include "sdma_v7_1.h"
#include "lsdma_v6_0.h"
#include "lsdma_v7_0.h"
+#include "lsdma_v7_1.h"
#include "vcn_v2_0.h"
#include "jpeg_v2_0.h"
#include "vcn_v3_0.h"
@@ -132,6 +133,7 @@ MODULE_FIRMWARE("amdgpu/picasso_ip_discovery.bin");
MODULE_FIRMWARE("amdgpu/arcturus_ip_discovery.bin");
MODULE_FIRMWARE("amdgpu/aldebaran_ip_discovery.bin");
+/* Note: These registers are consistent across all the SOCs */
#define mmIP_DISCOVERY_VERSION 0x16A00
#define mmRCC_CONFIG_MEMSIZE 0xde3
#define mmMP0_SMN_C2PMSG_33 0x16061
@@ -139,6 +141,10 @@ MODULE_FIRMWARE("amdgpu/aldebaran_ip_discovery.bin");
#define mmMM_INDEX_HI 0x6
#define mmMM_DATA 0x1
+#define mmDRIVER_SCRATCH_0 0x94
+#define mmDRIVER_SCRATCH_1 0x95
+#define mmDRIVER_SCRATCH_2 0x96
+
static const char *hw_id_names[HW_ID_MAX] = {
[MP1_HWID] = "MP1",
[MP2_HWID] = "MP2",
@@ -253,39 +259,12 @@ static int hw_id_map[MAX_HWIP] = {
[ATU_HWIP] = ATU_HWID,
};
-static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
+static int amdgpu_discovery_get_tmr_info(struct amdgpu_device *adev,
+ bool *is_tmr_in_sysmem)
{
- u64 tmr_offset, tmr_size, pos;
- void *discv_regn;
- int ret;
-
- ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
- if (ret)
- return ret;
-
- pos = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
-
- /* This region is read-only and reserved from system use */
- discv_regn = memremap(pos, adev->discovery.size, MEMREMAP_WC);
- if (discv_regn) {
- memcpy(binary, discv_regn, adev->discovery.size);
- memunmap(discv_regn);
- return 0;
- }
-
- return -ENOENT;
-}
-
-#define IP_DISCOVERY_V2 2
-#define IP_DISCOVERY_V4 4
-
-static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
- uint8_t *binary)
-{
- bool sz_valid = true;
- uint64_t vram_size;
- int i, ret = 0;
- u32 msg;
+ u64 vram_size, tmr_offset, tmr_size;
+ u32 msg, tmr_offset_lo, tmr_offset_hi;
+ int i, ret;
if (!amdgpu_sriov_vf(adev)) {
/* It can take up to two second for IFWI init to complete on some dGPUs,
@@ -305,51 +284,98 @@ static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
}
vram_size = RREG32(mmRCC_CONFIG_MEMSIZE);
- if (!vram_size || vram_size == U32_MAX)
- sz_valid = false;
+ if (vram_size == U32_MAX)
+ return -ENXIO;
+ else if (!vram_size)
+ *is_tmr_in_sysmem = true;
else
- vram_size <<= 20;
+ *is_tmr_in_sysmem = false;
- /*
- * If in VRAM, discovery TMR is marked for reservation. If it is in system mem,
- * then it is not required to be reserved.
- */
- if (sz_valid) {
- if (amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled) {
- /* For SRIOV VFs with dynamic critical region enabled,
- * we will get the IPD binary via below call.
- * If dynamic critical is disabled, fall through to normal seq.
- */
- if (amdgpu_virt_get_dynamic_data_info(adev,
- AMD_SRIOV_MSG_IPD_TABLE_ID, binary,
- &adev->discovery.size)) {
- dev_err(adev->dev,
- "failed to read discovery info from dynamic critical region.");
- ret = -EINVAL;
- goto exit;
- }
- } else {
- uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
+ /* init the default tmr size and offset */
+ adev->discovery.size = DISCOVERY_TMR_SIZE;
+ if (vram_size)
+ adev->discovery.offset = (vram_size << 20) - DISCOVERY_TMR_OFFSET;
+
+ if (amdgpu_sriov_vf(adev) && adev->virt.is_dynamic_crit_regn_enabled) {
+ adev->discovery.offset =
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].offset;
+ adev->discovery.size =
+ adev->virt.crit_regn_tbl[AMD_SRIOV_MSG_IPD_TABLE_ID].size_kb << 10;
+ if (!adev->discovery.offset || !adev->discovery.size)
+ return -EINVAL;
+ } else {
+ tmr_size = RREG32(mmDRIVER_SCRATCH_2);
+ if (tmr_size) {
+ /* It's preferred to transition to PSP mailbox reg interface
+ * for both bare-metal and passthrough if available */
+ adev->discovery.size = (u32)tmr_size;
+ tmr_offset_lo = RREG32(mmDRIVER_SCRATCH_0);
+ tmr_offset_hi = RREG32(mmDRIVER_SCRATCH_1);
+ adev->discovery.offset = ((u64)le32_to_cpu(tmr_offset_hi) << 32 |
+ le32_to_cpu(tmr_offset_lo));
+ } else if (!vram_size) {
+ /* fall back to apci approach to query tmr offset if vram_size is 0 */
+ ret = amdgpu_acpi_get_tmr_info(adev, &tmr_offset, &tmr_size);
+ if (ret)
+ return ret;
+ adev->discovery.size = (u32)tmr_size;
+ adev->discovery.offset = tmr_offset + tmr_size - DISCOVERY_TMR_OFFSET;
+ }
+ }
- amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
- adev->discovery.size, false);
+ adev->discovery.bin = kzalloc(adev->discovery.size, GFP_KERNEL);
+ if (!adev->discovery.bin)
+ return -ENOMEM;
+ adev->discovery.debugfs_blob.data = adev->discovery.bin;
+ adev->discovery.debugfs_blob.size = adev->discovery.size;
+
+ return 0;
+}
+
+static int amdgpu_discovery_read_binary_from_sysmem(struct amdgpu_device *adev, uint8_t *binary)
+{
+ void *discv_regn;
+
+ /* This region is read-only and reserved from system use */
+ discv_regn = memremap(adev->discovery.offset, adev->discovery.size, MEMREMAP_WC);
+ if (discv_regn) {
+ memcpy(binary, discv_regn, adev->discovery.size);
+ memunmap(discv_regn);
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+#define IP_DISCOVERY_V2 2
+#define IP_DISCOVERY_V4 4
+
+static int amdgpu_discovery_read_binary_from_mem(struct amdgpu_device *adev,
+ uint8_t *binary,
+ bool is_tmr_in_sysmem)
+{
+ int ret = 0;
+
+ if (!is_tmr_in_sysmem) {
+ if (amdgpu_sriov_vf(adev) &&
+ amdgpu_sriov_xgmi_connected_to_cpu(adev)) {
+ ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
+ } else {
+ amdgpu_device_vram_access(adev, adev->discovery.offset,
+ (uint32_t *)binary,
+ adev->discovery.size, false);
adev->discovery.reserve_tmr = true;
}
} else {
ret = amdgpu_discovery_read_binary_from_sysmem(adev, binary);
}
- if (ret)
- dev_err(adev->dev,
- "failed to read discovery info from memory, vram size read: %llx",
- vram_size);
-exit:
return ret;
}
static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev,
- uint8_t *binary,
- const char *fw_name)
+ uint8_t *binary,
+ const char *fw_name)
{
const struct firmware *fw;
int r;
@@ -431,14 +457,12 @@ static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
}
static int amdgpu_discovery_verify_npsinfo(struct amdgpu_device *adev,
- struct binary_header *bhdr)
+ struct table_info *info)
{
uint8_t *discovery_bin = adev->discovery.bin;
- struct table_info *info;
uint16_t checksum;
uint16_t offset;
- info = &bhdr->table_list[NPS_INFO];
offset = le16_to_cpu(info->offset);
checksum = le16_to_cpu(info->checksum);
@@ -491,23 +515,125 @@ static const char *amdgpu_discovery_get_fw_name(struct amdgpu_device *adev)
}
}
-static int amdgpu_discovery_init(struct amdgpu_device *adev)
+static int amdgpu_discovery_get_table_info(struct amdgpu_device *adev,
+ struct table_info **info,
+ uint16_t table_id)
+{
+ struct binary_header *bhdr =
+ (struct binary_header *)adev->discovery.bin;
+ struct binary_header_v2 *bhdrv2;
+
+ switch (bhdr->version_major) {
+ case 2:
+ bhdrv2 = (struct binary_header_v2 *)adev->discovery.bin;
+ *info = &bhdrv2->table_list[table_id];
+ break;
+ case 1:
+ *info = &bhdr->table_list[table_id];
+ break;
+ default:
+ dev_err(adev->dev, "Invalid ip discovery table version\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int amdgpu_discovery_table_check(struct amdgpu_device *adev,
+ uint8_t *discovery_bin,
+ uint16_t table_id)
{
+ int r, act_val, exp_val, table_size;
+ uint16_t offset, checksum;
struct table_info *info;
+ bool check_table = true;
+ char *table_name;
+
+ r = amdgpu_discovery_get_table_info(adev, &info, table_id);
+ if (r)
+ return r;
+ offset = le16_to_cpu(info->offset);
+ checksum = le16_to_cpu(info->checksum);
+
+ switch (table_id) {
+ case IP_DISCOVERY:
+ struct ip_discovery_header *ihdr =
+ (struct ip_discovery_header *)(discovery_bin + offset);
+ act_val = le32_to_cpu(ihdr->signature);
+ exp_val = DISCOVERY_TABLE_SIGNATURE;
+ table_size = le16_to_cpu(ihdr->size);
+ table_name = "data table";
+ break;
+ case GC:
+ struct gpu_info_header *ghdr =
+ (struct gpu_info_header *)(discovery_bin + offset);
+ act_val = le32_to_cpu(ghdr->table_id);
+ exp_val = GC_TABLE_ID;
+ table_size = le16_to_cpu(ghdr->size);
+ table_name = "gc table";
+ break;
+ case HARVEST_INFO:
+ struct harvest_info_header *hhdr =
+ (struct harvest_info_header *)(discovery_bin + offset);
+ act_val = le32_to_cpu(hhdr->signature);
+ exp_val = HARVEST_TABLE_SIGNATURE;
+ table_size = sizeof(struct harvest_table);
+ table_name = "harvest table";
+ break;
+ case VCN_INFO:
+ struct vcn_info_header *vhdr =
+ (struct vcn_info_header *)(discovery_bin + offset);
+ act_val = le32_to_cpu(vhdr->table_id);
+ exp_val = VCN_INFO_TABLE_ID;
+ table_size = le32_to_cpu(vhdr->size_bytes);
+ table_name = "vcn table";
+ break;
+ case MALL_INFO:
+ struct mall_info_header *mhdr =
+ (struct mall_info_header *)(discovery_bin + offset);
+ act_val = le32_to_cpu(mhdr->table_id);
+ exp_val = MALL_INFO_TABLE_ID;
+ table_size = le32_to_cpu(mhdr->size_bytes);
+ table_name = "mall table";
+ check_table = false;
+ break;
+ default:
+ dev_err(adev->dev, "invalid ip discovery table id %d specified\n", table_id);
+ check_table = false;
+ break;
+ }
+
+ if (check_table && offset) {
+ if (act_val != exp_val) {
+ dev_err(adev->dev, "invalid ip discovery %s signature\n", table_name);
+ return -EINVAL;
+ }
+
+ if (!amdgpu_discovery_verify_checksum(adev, discovery_bin + offset,
+ table_size, checksum)) {
+ dev_err(adev->dev, "invalid ip discovery %s checksum\n", table_name);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int amdgpu_discovery_init(struct amdgpu_device *adev)
+{
struct binary_header *bhdr;
uint8_t *discovery_bin;
const char *fw_name;
uint16_t offset;
uint16_t size;
uint16_t checksum;
+ uint16_t table_id;
+ bool is_tmr_in_sysmem;
int r;
- adev->discovery.bin = kzalloc(DISCOVERY_TMR_SIZE, GFP_KERNEL);
- if (!adev->discovery.bin)
- return -ENOMEM;
- adev->discovery.size = DISCOVERY_TMR_SIZE;
- adev->discovery.debugfs_blob.data = adev->discovery.bin;
- adev->discovery.debugfs_blob.size = adev->discovery.size;
+ r = amdgpu_discovery_get_tmr_info(adev, &is_tmr_in_sysmem);
+ if (r)
+ return r;
discovery_bin = adev->discovery.bin;
/* Read from file if it is the preferred option */
@@ -520,7 +646,8 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
} else {
drm_dbg(&adev->ddev, "use ip discovery information from memory");
- r = amdgpu_discovery_read_binary_from_mem(adev, discovery_bin);
+ r = amdgpu_discovery_read_binary_from_mem(adev, discovery_bin,
+ is_tmr_in_sysmem);
if (r)
goto out;
}
@@ -547,118 +674,10 @@ static int amdgpu_discovery_init(struct amdgpu_device *adev)
goto out;
}
- info = &bhdr->table_list[IP_DISCOVERY];
- offset = le16_to_cpu(info->offset);
- checksum = le16_to_cpu(info->checksum);
-
- if (offset) {
- struct ip_discovery_header *ihdr =
- (struct ip_discovery_header *)(discovery_bin + offset);
- if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
- dev_err(adev->dev, "invalid ip discovery data table signature\n");
- r = -EINVAL;
- goto out;
- }
-
- if (!amdgpu_discovery_verify_checksum(adev, discovery_bin + offset,
- le16_to_cpu(ihdr->size),
- checksum)) {
- dev_err(adev->dev, "invalid ip discovery data table checksum\n");
- r = -EINVAL;
- goto out;
- }
- }
-
- info = &bhdr->table_list[GC];
- offset = le16_to_cpu(info->offset);
- checksum = le16_to_cpu(info->checksum);
-
- if (offset) {
- struct gpu_info_header *ghdr =
- (struct gpu_info_header *)(discovery_bin + offset);
-
- if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
- dev_err(adev->dev, "invalid ip discovery gc table id\n");
- r = -EINVAL;
- goto out;
- }
-
- if (!amdgpu_discovery_verify_checksum(adev, discovery_bin + offset,
- le32_to_cpu(ghdr->size),
- checksum)) {
- dev_err(adev->dev, "invalid gc data table checksum\n");
- r = -EINVAL;
- goto out;
- }
- }
-
- info = &bhdr->table_list[HARVEST_INFO];
- offset = le16_to_cpu(info->offset);
- checksum = le16_to_cpu(info->checksum);
-
- if (offset) {
- struct harvest_info_header *hhdr =
- (struct harvest_info_header *)(discovery_bin + offset);
-
- if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
- dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
- r = -EINVAL;
- goto out;
- }
-
- if (!amdgpu_discovery_verify_checksum(adev,
- discovery_bin + offset,
- sizeof(struct harvest_table), checksum)) {
- dev_err(adev->dev, "invalid harvest data table checksum\n");
- r = -EINVAL;
- goto out;
- }
- }
-
- info = &bhdr->table_list[VCN_INFO];
- offset = le16_to_cpu(info->offset);
- checksum = le16_to_cpu(info->checksum);
-
- if (offset) {
- struct vcn_info_header *vhdr =
- (struct vcn_info_header *)(discovery_bin + offset);
-
- if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
- dev_err(adev->dev, "invalid ip discovery vcn table id\n");
- r = -EINVAL;
- goto out;
- }
-
- if (!amdgpu_discovery_verify_checksum(adev,
- discovery_bin + offset,
- le32_to_cpu(vhdr->size_bytes), checksum)) {
- dev_err(adev->dev, "invalid vcn data table checksum\n");
- r = -EINVAL;
- goto out;
- }
- }
-
- info = &bhdr->table_list[MALL_INFO];
- offset = le16_to_cpu(info->offset);
- checksum = le16_to_cpu(info->checksum);
-
- if (0 && offset) {
- struct mall_info_header *mhdr =
- (struct mall_info_header *)(discovery_bin + offset);
-
- if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
- dev_err(adev->dev, "invalid ip discovery mall table id\n");
- r = -EINVAL;
- goto out;
- }
-
- if (!amdgpu_discovery_verify_checksum(adev,
- discovery_bin + offset,
- le32_to_cpu(mhdr->size_bytes), checksum)) {
- dev_err(adev->dev, "invalid mall data table checksum\n");
- r = -EINVAL;
+ for (table_id = 0; table_id <= MALL_INFO; table_id++) {
+ r = amdgpu_discovery_table_check(adev, discovery_bin, table_id);
+ if (r)
goto out;
- }
}
return 0;
@@ -770,14 +789,15 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
uint32_t *umc_harvest_count)
{
uint8_t *discovery_bin = adev->discovery.bin;
- struct binary_header *bhdr;
+ struct table_info *info;
struct harvest_table *harvest_info;
u16 offset;
int i;
- uint32_t umc_harvest_config = 0;
+ u64 umc_harvest_config = 0;
- bhdr = (struct binary_header *)discovery_bin;
- offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
+ if (amdgpu_discovery_get_table_info(adev, &info, HARVEST_INFO))
+ return;
+ offset = le16_to_cpu(info->offset);
if (!offset) {
dev_err(adev->dev, "invalid harvest table offset\n");
@@ -830,7 +850,7 @@ static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
}
}
- adev->umc.active_mask = ((1 << adev->umc.node_inst_num) - 1) &
+ adev->umc.active_mask = ((1ULL << adev->umc.node_inst_num) - 1ULL) &
~umc_harvest_config;
}
@@ -1195,13 +1215,8 @@ static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
ip_hw_instance->num_instance);
ip_hw_instance->num_base_addresses = ip->num_base_address;
- for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++) {
- if (reg_base_64)
- ip_hw_instance->base_addr[kk] =
- lower_32_bits(le64_to_cpu(ip->base_address_64[kk])) & 0x3FFFFFFF;
- else
- ip_hw_instance->base_addr[kk] = ip->base_address[kk];
- }
+ for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++)
+ ip_hw_instance->base_addr[kk] = ip->base_address[kk];
kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
@@ -1224,7 +1239,7 @@ static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
{
struct ip_discovery_top *ip_top = adev->discovery.ip_top;
uint8_t *discovery_bin = adev->discovery.bin;
- struct binary_header *bhdr;
+ struct table_info *info;
struct ip_discovery_header *ihdr;
struct die_header *dhdr;
struct kset *die_kset = &ip_top->die_kset;
@@ -1232,10 +1247,12 @@ static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
size_t ip_offset;
int ii, res;
- bhdr = (struct binary_header *)discovery_bin;
+ res = amdgpu_discovery_get_table_info(adev, &info, IP_DISCOVERY);
+ if (res)
+ return res;
ihdr = (struct ip_discovery_header
*)(discovery_bin +
- le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ le16_to_cpu(info->offset));
num_dies = le16_to_cpu(ihdr->num_dies);
DRM_DEBUG("number of dies: %d\n", num_dies);
@@ -1379,12 +1396,54 @@ static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
kobject_put(&ip_top->kobj);
}
+/* devcoredump support */
+void amdgpu_discovery_dump(struct amdgpu_device *adev, struct drm_printer *p)
+{
+ struct ip_discovery_top *ip_top = adev->discovery.ip_top;
+ struct ip_die_entry *ip_die_entry;
+ struct list_head *el_die, *el_hw_id, *el_hw_inst;
+ struct ip_hw_id *hw_id;
+ struct kset *die_kset;
+ struct ip_hw_instance *ip_inst;
+ int i = 0, j;
+
+ die_kset = &ip_top->die_kset;
+
+ drm_printf(p, "\nHW IP Discovery\n");
+ spin_lock(&die_kset->list_lock);
+ list_for_each(el_die, &die_kset->list) {
+ drm_printf(p, "die %d\n", i++);
+ ip_die_entry = to_ip_die_entry(list_to_kobj(el_die));
+
+ list_for_each(el_hw_id, &ip_die_entry->ip_kset.list) {
+ hw_id = to_ip_hw_id(list_to_kobj(el_hw_id));
+ drm_printf(p, "hw_id %d %s\n", hw_id->hw_id, hw_id_names[hw_id->hw_id]);
+
+ list_for_each(el_hw_inst, &hw_id->hw_id_kset.list) {
+ ip_inst = to_ip_hw_instance(list_to_kobj(el_hw_inst));
+ drm_printf(p, "\tinstance %d\n", ip_inst->num_instance);
+ drm_printf(p, "\tmajor %d\n", ip_inst->major);
+ drm_printf(p, "\tminor %d\n", ip_inst->minor);
+ drm_printf(p, "\trevision %d\n", ip_inst->revision);
+ drm_printf(p, "\tharvest 0x%01X\n", ip_inst->harvest);
+ drm_printf(p, "\tnum_base_addresses %d\n",
+ ip_inst->num_base_addresses);
+ for (j = 0; j < ip_inst->num_base_addresses; j++)
+ drm_printf(p, "\tbase_addr[%d] 0x%08X\n",
+ j, ip_inst->base_addr[j]);
+ }
+ }
+ }
+ spin_unlock(&die_kset->list_lock);
+}
+
+
/* ================================================== */
static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
{
uint8_t num_base_address, subrev, variant;
- struct binary_header *bhdr;
+ struct table_info *info;
struct ip_discovery_header *ihdr;
struct die_header *dhdr;
uint8_t *discovery_bin;
@@ -1409,10 +1468,12 @@ static int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
adev->sdma.sdma_mask = 0;
adev->vcn.inst_mask = 0;
adev->jpeg.inst_mask = 0;
- bhdr = (struct binary_header *)discovery_bin;
+ r = amdgpu_discovery_get_table_info(adev, &info, IP_DISCOVERY);
+ if (r)
+ return r;
ihdr = (struct ip_discovery_header
*)(discovery_bin +
- le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
+ le16_to_cpu(info->offset));
num_dies = le16_to_cpu(ihdr->num_dies);
DRM_DEBUG("number of dies: %d\n", num_dies);
@@ -1585,14 +1646,15 @@ static void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
{
uint8_t *discovery_bin = adev->discovery.bin;
struct ip_discovery_header *ihdr;
- struct binary_header *bhdr;
+ struct table_info *info;
int vcn_harvest_count = 0;
int umc_harvest_count = 0;
- uint16_t offset, ihdr_ver;
+ uint16_t ihdr_ver;
- bhdr = (struct binary_header *)discovery_bin;
- offset = le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset);
- ihdr = (struct ip_discovery_header *)(discovery_bin + offset);
+ if (amdgpu_discovery_get_table_info(adev, &info, IP_DISCOVERY))
+ return;
+ ihdr = (struct ip_discovery_header *)(discovery_bin +
+ le16_to_cpu(info->offset));
ihdr_ver = le16_to_cpu(ihdr->version);
/*
* Harvest table does not fit Navi1x and legacy GPUs,
@@ -1640,7 +1702,7 @@ union gc_info {
static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
{
uint8_t *discovery_bin = adev->discovery.bin;
- struct binary_header *bhdr;
+ struct table_info *info;
union gc_info *gc_info;
u16 offset;
@@ -1649,8 +1711,9 @@ static int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
return -EINVAL;
}
- bhdr = (struct binary_header *)discovery_bin;
- offset = le16_to_cpu(bhdr->table_list[GC].offset);
+ if (amdgpu_discovery_get_table_info(adev, &info, GC))
+ return -EINVAL;
+ offset = le16_to_cpu(info->offset);
if (!offset)
return 0;
@@ -1749,7 +1812,7 @@ union mall_info {
static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
{
uint8_t *discovery_bin = adev->discovery.bin;
- struct binary_header *bhdr;
+ struct table_info *info;
union mall_info *mall_info;
u32 u, mall_size_per_umc, m_s_present, half_use;
u64 mall_size;
@@ -1760,8 +1823,9 @@ static int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
return -EINVAL;
}
- bhdr = (struct binary_header *)discovery_bin;
- offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
+ if (amdgpu_discovery_get_table_info(adev, &info, MALL_INFO))
+ return -EINVAL;
+ offset = le16_to_cpu(info->offset);
if (!offset)
return 0;
@@ -1806,7 +1870,7 @@ union vcn_info {
static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
{
uint8_t *discovery_bin = adev->discovery.bin;
- struct binary_header *bhdr;
+ struct table_info *info;
union vcn_info *vcn_info;
u16 offset;
int v;
@@ -1826,8 +1890,9 @@ static int amdgpu_discovery_get_vcn_info(struct amdgpu_device *adev)
return -EINVAL;
}
- bhdr = (struct binary_header *)discovery_bin;
- offset = le16_to_cpu(bhdr->table_list[VCN_INFO].offset);
+ if (amdgpu_discovery_get_table_info(adev, &info, VCN_INFO))
+ return -EINVAL;
+ offset = le16_to_cpu(info->offset);
if (!offset)
return 0;
@@ -1864,14 +1929,26 @@ static int amdgpu_discovery_refresh_nps_info(struct amdgpu_device *adev,
uint64_t vram_size, pos, offset;
struct nps_info_header *nhdr;
struct binary_header bhdr;
+ struct binary_header_v2 bhdrv2;
uint16_t checksum;
vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
pos = vram_size - DISCOVERY_TMR_OFFSET;
amdgpu_device_vram_access(adev, pos, &bhdr, sizeof(bhdr), false);
- offset = le16_to_cpu(bhdr.table_list[NPS_INFO].offset);
- checksum = le16_to_cpu(bhdr.table_list[NPS_INFO].checksum);
+ switch (bhdr.version_major) {
+ case 2:
+ amdgpu_device_vram_access(adev, pos, &bhdrv2, sizeof(bhdrv2), false);
+ offset = le16_to_cpu(bhdrv2.table_list[NPS_INFO].offset);
+ checksum = le16_to_cpu(bhdrv2.table_list[NPS_INFO].checksum);
+ break;
+ case 1:
+ offset = le16_to_cpu(bhdr.table_list[NPS_INFO].offset);
+ checksum = le16_to_cpu(bhdr.table_list[NPS_INFO].checksum);
+ break;
+ default:
+ return -EINVAL;
+ }
amdgpu_device_vram_access(adev, (pos + offset), nps_data,
sizeof(*nps_data), false);
@@ -1894,7 +1971,7 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
{
uint8_t *discovery_bin = adev->discovery.bin;
struct amdgpu_gmc_memrange *mem_ranges;
- struct binary_header *bhdr;
+ struct table_info *info;
union nps_info *nps_info;
union nps_info nps_data;
u16 offset;
@@ -1915,14 +1992,15 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
return -EINVAL;
}
- bhdr = (struct binary_header *)discovery_bin;
- offset = le16_to_cpu(bhdr->table_list[NPS_INFO].offset);
+ if (amdgpu_discovery_get_table_info(adev, &info, NPS_INFO))
+ return -EINVAL;
+ offset = le16_to_cpu(info->offset);
if (!offset)
return -ENOENT;
/* If verification fails, return as if NPS table doesn't exist */
- if (amdgpu_discovery_verify_npsinfo(adev, bhdr))
+ if (amdgpu_discovery_verify_npsinfo(adev, info))
return -ENOENT;
nps_info = (union nps_info *)(discovery_bin + offset);
@@ -3226,6 +3304,9 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
case IP_VERSION(7, 0, 1):
adev->lsdma.funcs = &lsdma_v7_0_funcs;
break;
+ case IP_VERSION(7, 1, 0):
+ adev->lsdma.funcs = &lsdma_v7_1_funcs;
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
index 4ce04486cc31..a7aeb47887a3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.h
@@ -30,10 +30,12 @@
#define DISCOVERY_TMR_OFFSET (64 << 10)
struct ip_discovery_top;
+struct drm_printer;
struct amdgpu_discovery_info {
struct debugfs_blob_wrapper debugfs_blob;
struct ip_discovery_top *ip_top;
+ uint64_t offset;
uint32_t size;
uint8_t *bin;
bool reserve_tmr;
@@ -47,4 +49,6 @@ int amdgpu_discovery_get_nps_info(struct amdgpu_device *adev,
struct amdgpu_gmc_memrange **ranges,
int *range_cnt, bool refresh);
+void amdgpu_discovery_dump(struct amdgpu_device *adev, struct drm_printer *p);
+
#endif /* __AMDGPU_DISCOVERY__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index bef9dce2e7ea..f5cd68542442 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -1738,21 +1738,6 @@ bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
stime, etime, mode);
}
-static bool
-amdgpu_display_robj_is_fb(struct amdgpu_device *adev, struct amdgpu_bo *robj)
-{
- struct drm_device *dev = adev_to_drm(adev);
- struct drm_fb_helper *fb_helper = dev->fb_helper;
-
- if (!fb_helper || !fb_helper->buffer)
- return false;
-
- if (gem_to_amdgpu_bo(fb_helper->buffer->gem) != robj)
- return false;
-
- return true;
-}
-
int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
{
struct drm_device *dev = adev_to_drm(adev);
@@ -1775,7 +1760,6 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
- struct amdgpu_bo *robj;
if (amdgpu_crtc->cursor_bo && !adev->enable_virtual_display) {
struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
@@ -1790,8 +1774,9 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev)
if (!fb || !fb->obj[0])
continue;
- robj = gem_to_amdgpu_bo(fb->obj[0]);
- if (!amdgpu_display_robj_is_fb(adev, robj)) {
+ if (!drm_fb_helper_gem_is_fb(dev->fb_helper, fb->obj[0])) {
+ struct amdgpu_bo *robj = gem_to_amdgpu_bo(fb->obj[0]);
+
r = amdgpu_bo_reserve(robj, true);
if (r == 0) {
amdgpu_bo_unpin(robj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 0e8a52d96573..b9fdc3276e81 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -94,6 +94,10 @@ enum amdgpu_memory_partition {
#define AMDGPU_GMC9_FAULT_SOURCE_DATA_WRITE 0x20
#define AMDGPU_GMC9_FAULT_SOURCE_DATA_EXE 0x10
+#define AMDGPU_GMC121_FAULT_SOURCE_DATA_READ 0x400000
+#define AMDGPU_GMC121_FAULT_SOURCE_DATA_WRITE 0x200000
+#define AMDGPU_GMC121_FAULT_SOURCE_DATA_EXE 0x100000
+
/*
* GMC page fault information
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 77e2133de5cf..7f19554b9ad1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -83,7 +83,7 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
{
struct amdgpu_device *adev = drm_to_adev(dev);
- if (adev == NULL)
+ if (adev == NULL || !adev->num_ip_blocks)
return;
amdgpu_unregister_gpu_instance(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index bcf2a067dc41..f80e3aca9c78 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -159,9 +159,9 @@ struct amdgpu_mes {
int hung_queue_db_array_size;
int hung_queue_hqd_info_offset;
- struct amdgpu_bo *hung_queue_db_array_gpu_obj[AMDGPU_MAX_MES_PIPES];
- uint64_t hung_queue_db_array_gpu_addr[AMDGPU_MAX_MES_PIPES];
- void *hung_queue_db_array_cpu_addr[AMDGPU_MAX_MES_PIPES];
+ struct amdgpu_bo *hung_queue_db_array_gpu_obj[AMDGPU_MAX_MES_INST_PIPES];
+ uint64_t hung_queue_db_array_gpu_addr[AMDGPU_MAX_MES_INST_PIPES];
+ void *hung_queue_db_array_cpu_addr[AMDGPU_MAX_MES_INST_PIPES];
/* cooperative dispatch */
bool enable_coop_mode;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index dc8d2f52c7d6..90352284c5ee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -368,15 +368,15 @@ struct amdgpu_mode_info {
struct drm_property *plane_ctm_property;
/**
- * @shaper_lut_property: Plane property to set pre-blending shaper LUT
- * that converts color content before 3D LUT. If
- * plane_shaper_tf_property != Identity TF, AMD color module will
+ * @plane_shaper_lut_property: Plane property to set pre-blending
+ * shaper LUT that converts color content before 3D LUT.
+ * If plane_shaper_tf_property != Identity TF, AMD color module will
* combine the user LUT values with pre-defined TF into the LUT
* parameters to be programmed.
*/
struct drm_property *plane_shaper_lut_property;
/**
- * @shaper_lut_size_property: Plane property for the size of
+ * @plane_shaper_lut_size_property: Plane property for the size of
* pre-blending shaper LUT as supported by the driver (read-only).
*/
struct drm_property *plane_shaper_lut_size_property;
@@ -400,10 +400,10 @@ struct amdgpu_mode_info {
*/
struct drm_property *plane_lut3d_property;
/**
- * @plane_degamma_lut_size_property: Plane property to define the max
- * size of 3D LUT as supported by the driver (read-only). The max size
- * is the max size of one dimension and, therefore, the max number of
- * entries for 3D LUT array is the 3D LUT size cubed;
+ * @plane_lut3d_size_property: Plane property to define the max size
+ * of 3D LUT as supported by the driver (read-only). The max size is
+ * the max size of one dimension and, therefore, the max number of
+ * entries for 3D LUT array is the 3D LUT size cubed.
*/
struct drm_property *plane_lut3d_size_property;
/**
@@ -624,7 +624,7 @@ struct amdgpu_connector {
bool use_digital;
/* we need to mind the EDID between detect
and get modes due to analog/digital/tvencoder */
- struct edid *edid;
+ const struct drm_edid *edid;
void *con_priv;
bool dac_load_detect;
bool detected_by_load; /* if the connection status was determined by load */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 27b67da9fdac..d39b695cd925 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -3096,6 +3096,13 @@ static int psp_load_non_psp_fw(struct psp_context *psp)
*/
continue;
+ /* IMU ucode is part of IFWI and MP0 15.0.8 would load it */
+ if (amdgpu_ip_version(adev, MP0_HWIP, 0) ==
+ IP_VERSION(15, 0, 8) &&
+ (ucode->ucode_id == AMDGPU_UCODE_ID_IMU_I ||
+ ucode->ucode_id == AMDGPU_UCODE_ID_IMU_D))
+ continue;
+
psp_print_fw_hdr(psp, ucode);
ret = psp_execute_ip_fw_load(psp, ucode);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 1ab61e7b35db..82333aeb4453 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -291,22 +291,22 @@ void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header *hdr)
break;
case 5:
/* rlc_hdr v2_5 */
- DRM_INFO("rlc_iram_ucode_size_bytes: %u\n",
+ DRM_DEBUG("rlc_iram_ucode_size_bytes: %u\n",
le32_to_cpu(rlc_hdr_v2_5->v2_2.rlc_iram_ucode_size_bytes));
- DRM_INFO("rlc_iram_ucode_offset_bytes: %u\n",
+ DRM_DEBUG("rlc_iram_ucode_offset_bytes: %u\n",
le32_to_cpu(rlc_hdr_v2_5->v2_2.rlc_iram_ucode_offset_bytes));
- DRM_INFO("rlc_dram_ucode_size_bytes: %u\n",
+ DRM_DEBUG("rlc_dram_ucode_size_bytes: %u\n",
le32_to_cpu(rlc_hdr_v2_5->v2_2.rlc_dram_ucode_size_bytes));
- DRM_INFO("rlc_dram_ucode_offset_bytes: %u\n",
+ DRM_DEBUG("rlc_dram_ucode_offset_bytes: %u\n",
le32_to_cpu(rlc_hdr_v2_5->v2_2.rlc_dram_ucode_offset_bytes));
/* rlc_hdr v2_5 */
- DRM_INFO("rlc_1_iram_ucode_size_bytes: %u\n",
+ DRM_DEBUG("rlc_1_iram_ucode_size_bytes: %u\n",
le32_to_cpu(rlc_hdr_v2_5->rlc_1_iram_ucode_size_bytes));
- DRM_INFO("rlc_1_iram_ucode_offset_bytes: %u\n",
+ DRM_DEBUG("rlc_1_iram_ucode_offset_bytes: %u\n",
le32_to_cpu(rlc_hdr_v2_5->rlc_1_iram_ucode_offset_bytes));
- DRM_INFO("rlc_1_dram_ucode_size_bytes: %u\n",
+ DRM_DEBUG("rlc_1_dram_ucode_size_bytes: %u\n",
le32_to_cpu(rlc_hdr_v2_5->rlc_1_dram_ucode_size_bytes));
- DRM_INFO("rlc_1_dram_ucode_offset_bytes: %u\n",
+ DRM_DEBUG("rlc_1_dram_ucode_offset_bytes: %u\n",
le32_to_cpu(rlc_hdr_v2_5->rlc_1_dram_ucode_offset_bytes));
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
index 7c450350847d..6d964a6ee349 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_userq.c
@@ -709,46 +709,6 @@ static int amdgpu_userq_priority_permit(struct drm_file *filp,
return -EACCES;
}
-#if defined(CONFIG_DEBUG_FS)
-static int amdgpu_mqd_info_read(struct seq_file *m, void *unused)
-{
- struct amdgpu_usermode_queue *queue = m->private;
- struct amdgpu_bo *bo;
- int r;
-
- if (!queue || !queue->mqd.obj)
- return -EINVAL;
-
- bo = amdgpu_bo_ref(queue->mqd.obj);
- r = amdgpu_bo_reserve(bo, true);
- if (r) {
- amdgpu_bo_unref(&bo);
- return -EINVAL;
- }
-
- seq_printf(m, "queue_type: %d\n", queue->queue_type);
- seq_printf(m, "mqd_gpu_address: 0x%llx\n", amdgpu_bo_gpu_offset(queue->mqd.obj));
-
- amdgpu_bo_unreserve(bo);
- amdgpu_bo_unref(&bo);
-
- return 0;
-}
-
-static int amdgpu_mqd_info_open(struct inode *inode, struct file *file)
-{
- return single_open(file, amdgpu_mqd_info_read, inode->i_private);
-}
-
-static const struct file_operations amdgpu_mqd_info_fops = {
- .owner = THIS_MODULE,
- .open = amdgpu_mqd_info_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-#endif
-
static int
amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
{
@@ -758,7 +718,6 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
const struct amdgpu_userq_funcs *uq_funcs;
struct amdgpu_usermode_queue *queue;
struct amdgpu_db_info db_info;
- char *queue_name;
bool skip_map_queue;
u32 qid;
uint64_t index;
@@ -819,17 +778,15 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
amdgpu_userq_input_va_validate(adev, queue, args->in.rptr_va, AMDGPU_GPU_PAGE_SIZE) ||
amdgpu_userq_input_va_validate(adev, queue, args->in.wptr_va, AMDGPU_GPU_PAGE_SIZE)) {
r = -EINVAL;
- kfree(queue);
- goto unlock;
+ goto free_queue;
}
/* Convert relative doorbell offset into absolute doorbell index */
index = amdgpu_userq_get_doorbell_index(uq_mgr, &db_info, filp);
if (index == (uint64_t)-EINVAL) {
drm_file_err(uq_mgr->file, "Failed to get doorbell for queue\n");
- kfree(queue);
r = -EINVAL;
- goto unlock;
+ goto free_queue;
}
queue->doorbell_index = index;
@@ -837,42 +794,15 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
r = amdgpu_userq_fence_driver_alloc(adev, queue);
if (r) {
drm_file_err(uq_mgr->file, "Failed to alloc fence driver\n");
- goto unlock;
+ goto free_queue;
}
r = uq_funcs->mqd_create(queue, &args->in);
if (r) {
drm_file_err(uq_mgr->file, "Failed to create Queue\n");
- amdgpu_userq_fence_driver_free(queue);
- kfree(queue);
- goto unlock;
- }
-
- /* drop this refcount during queue destroy */
- kref_init(&queue->refcount);
-
- /* Wait for mode-1 reset to complete */
- down_read(&adev->reset_domain->sem);
- r = xa_err(xa_store_irq(&adev->userq_doorbell_xa, index, queue, GFP_KERNEL));
- if (r) {
- kfree(queue);
- up_read(&adev->reset_domain->sem);
- goto unlock;
+ goto clean_fence_driver;
}
- r = xa_alloc(&uq_mgr->userq_xa, &qid, queue,
- XA_LIMIT(1, AMDGPU_MAX_USERQ_COUNT), GFP_KERNEL);
- if (r) {
- drm_file_err(uq_mgr->file, "Failed to allocate a queue id\n");
- amdgpu_userq_fence_driver_free(queue);
- uq_funcs->mqd_destroy(queue);
- kfree(queue);
- r = -ENOMEM;
- up_read(&adev->reset_domain->sem);
- goto unlock;
- }
- up_read(&adev->reset_domain->sem);
-
/* don't map the queue if scheduling is halted */
if (adev->userq_halt_for_enforce_isolation &&
((queue->queue_type == AMDGPU_HW_IP_GFX) ||
@@ -884,31 +814,52 @@ amdgpu_userq_create(struct drm_file *filp, union drm_amdgpu_userq *args)
r = amdgpu_userq_map_helper(queue);
if (r) {
drm_file_err(uq_mgr->file, "Failed to map Queue\n");
- xa_erase(&uq_mgr->userq_xa, qid);
- amdgpu_userq_fence_driver_free(queue);
- uq_funcs->mqd_destroy(queue);
- kfree(queue);
- goto unlock;
+ down_read(&adev->reset_domain->sem);
+ goto clean_mqd;
}
}
- queue_name = kasprintf(GFP_KERNEL, "queue-%d", qid);
- if (!queue_name) {
+ /* drop this refcount during queue destroy */
+ kref_init(&queue->refcount);
+
+ /* Wait for mode-1 reset to complete */
+ down_read(&adev->reset_domain->sem);
+
+ r = xa_alloc(&uq_mgr->userq_xa, &qid, queue,
+ XA_LIMIT(1, AMDGPU_MAX_USERQ_COUNT), GFP_KERNEL);
+ if (r) {
+ if (!skip_map_queue)
+ amdgpu_userq_unmap_helper(queue);
+
r = -ENOMEM;
- goto unlock;
+ goto clean_mqd;
}
-#if defined(CONFIG_DEBUG_FS)
- /* Queue dentry per client to hold MQD information */
- queue->debugfs_queue = debugfs_create_dir(queue_name, filp->debugfs_client);
- debugfs_create_file("mqd_info", 0444, queue->debugfs_queue, queue, &amdgpu_mqd_info_fops);
-#endif
+ r = xa_err(xa_store_irq(&adev->userq_doorbell_xa, index, queue, GFP_KERNEL));
+ if (r) {
+ xa_erase(&uq_mgr->userq_xa, qid);
+ if (!skip_map_queue)
+ amdgpu_userq_unmap_helper(queue);
+
+ goto clean_mqd;
+ }
+ up_read(&adev->reset_domain->sem);
+
+ amdgpu_debugfs_userq_init(filp, queue, qid);
amdgpu_userq_init_hang_detect_work(queue);
- kfree(queue_name);
args->out.queue_id = qid;
atomic_inc(&uq_mgr->userq_count[queue->queue_type]);
+ mutex_unlock(&uq_mgr->userq_mutex);
+ return 0;
+clean_mqd:
+ uq_funcs->mqd_destroy(queue);
+ up_read(&adev->reset_domain->sem);
+clean_fence_driver:
+ amdgpu_userq_fence_driver_free(queue);
+free_queue:
+ kfree(queue);
unlock:
mutex_unlock(&uq_mgr->userq_mutex);
@@ -1089,12 +1040,12 @@ amdgpu_userq_bo_validate(struct amdgpu_device *adev, struct drm_exec *exec,
struct amdgpu_bo *bo;
int ret;
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->invalidated_lock);
while (!list_empty(&vm->invalidated)) {
bo_va = list_first_entry(&vm->invalidated,
struct amdgpu_bo_va,
base.vm_status);
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->invalidated_lock);
bo = bo_va->base.bo;
ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 2);
@@ -1111,9 +1062,9 @@ amdgpu_userq_bo_validate(struct amdgpu_device *adev, struct drm_exec *exec,
if (ret)
return ret;
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->invalidated_lock);
}
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->invalidated_lock);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 886fbce0bfd1..9da0c6e9b869 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -162,6 +162,7 @@ enum AMDGIM_FEATURE_FLAG {
AMDGIM_FEATURE_RAS_TELEMETRY = (1 << 10),
AMDGIM_FEATURE_RAS_CPER = (1 << 11),
AMDGIM_FEATURE_XGMI_TA_EXT_PEER_LINK = (1 << 12),
+ AMDGIM_FEATURE_XGMI_CONNECTED_TO_CPU = (1 << 13),
};
enum AMDGIM_REG_ACCESS_FLAG {
@@ -412,6 +413,9 @@ struct amdgpu_video_codec_info;
#define amdgpu_sriov_xgmi_ta_ext_peer_link_en(adev) \
((adev)->virt.gim_feature & AMDGIM_FEATURE_XGMI_TA_EXT_PEER_LINK)
+#define amdgpu_sriov_xgmi_connected_to_cpu(adev) \
+((adev)->virt.gim_feature & AMDGIM_FEATURE_XGMI_CONNECTED_TO_CPU)
+
static inline bool is_virtual_machine(void)
{
#if defined(CONFIG_X86)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index dcd49b0fb6e0..76248a0276ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -153,12 +153,10 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
vm_bo->moved = true;
amdgpu_vm_assert_locked(vm);
- spin_lock(&vm_bo->vm->status_lock);
if (bo->tbo.type == ttm_bo_type_kernel)
list_move(&vm_bo->vm_status, &vm->evicted);
else
list_move_tail(&vm_bo->vm_status, &vm->evicted);
- spin_unlock(&vm_bo->vm->status_lock);
}
/**
* amdgpu_vm_bo_moved - vm_bo is moved
@@ -171,9 +169,7 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
{
amdgpu_vm_assert_locked(vm_bo->vm);
- spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
- spin_unlock(&vm_bo->vm->status_lock);
}
/**
@@ -187,9 +183,7 @@ static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
{
amdgpu_vm_assert_locked(vm_bo->vm);
- spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
- spin_unlock(&vm_bo->vm->status_lock);
vm_bo->moved = false;
}
@@ -203,9 +197,9 @@ static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
{
- spin_lock(&vm_bo->vm->status_lock);
+ spin_lock(&vm_bo->vm->invalidated_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
- spin_unlock(&vm_bo->vm->status_lock);
+ spin_unlock(&vm_bo->vm->invalidated_lock);
}
/**
@@ -218,10 +212,9 @@ static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
*/
static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
{
+ amdgpu_vm_assert_locked(vm_bo->vm);
vm_bo->moved = true;
- spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->evicted_user);
- spin_unlock(&vm_bo->vm->status_lock);
}
/**
@@ -235,13 +228,10 @@ static void amdgpu_vm_bo_evicted_user(struct amdgpu_vm_bo_base *vm_bo)
static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
{
amdgpu_vm_assert_locked(vm_bo->vm);
- if (vm_bo->bo->parent) {
- spin_lock(&vm_bo->vm->status_lock);
+ if (vm_bo->bo->parent)
list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
- spin_unlock(&vm_bo->vm->status_lock);
- } else {
+ else
amdgpu_vm_bo_idle(vm_bo);
- }
}
/**
@@ -255,9 +245,7 @@ static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
{
amdgpu_vm_assert_locked(vm_bo->vm);
- spin_lock(&vm_bo->vm->status_lock);
list_move(&vm_bo->vm_status, &vm_bo->vm->done);
- spin_unlock(&vm_bo->vm->status_lock);
}
/**
@@ -271,13 +259,13 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
{
struct amdgpu_vm_bo_base *vm_bo, *tmp;
- amdgpu_vm_assert_locked(vm);
-
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->invalidated_lock);
list_splice_init(&vm->done, &vm->invalidated);
list_for_each_entry(vm_bo, &vm->invalidated, vm_status)
vm_bo->moved = true;
+ spin_unlock(&vm->invalidated_lock);
+ amdgpu_vm_assert_locked(vm);
list_for_each_entry_safe(vm_bo, tmp, &vm->idle, vm_status) {
struct amdgpu_bo *bo = vm_bo->bo;
@@ -287,14 +275,13 @@ static void amdgpu_vm_bo_reset_state_machine(struct amdgpu_vm *vm)
else if (bo->parent)
list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
}
- spin_unlock(&vm->status_lock);
}
/**
* amdgpu_vm_update_shared - helper to update shared memory stat
* @base: base structure for tracking BO usage in a VM
*
- * Takes the vm status_lock and updates the shared memory stat. If the basic
+ * Takes the vm stats_lock and updates the shared memory stat. If the basic
* stat changed (e.g. buffer was moved) amdgpu_vm_update_stats need to be called
* as well.
*/
@@ -307,7 +294,7 @@ static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
bool shared;
dma_resv_assert_held(bo->tbo.base.resv);
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->stats_lock);
shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
if (base->shared != shared) {
base->shared = shared;
@@ -319,7 +306,7 @@ static void amdgpu_vm_update_shared(struct amdgpu_vm_bo_base *base)
vm->stats[bo_memtype].drm.private += size;
}
}
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->stats_lock);
}
/**
@@ -344,11 +331,11 @@ void amdgpu_vm_bo_update_shared(struct amdgpu_bo *bo)
* be bo->tbo.resource
* @sign: if we should add (+1) or subtract (-1) from the stat
*
- * Caller need to have the vm status_lock held. Useful for when multiple update
+ * Caller need to have the vm stats_lock held. Useful for when multiple update
* need to happen at the same time.
*/
static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base,
- struct ttm_resource *res, int sign)
+ struct ttm_resource *res, int sign)
{
struct amdgpu_vm *vm = base->vm;
struct amdgpu_bo *bo = base->bo;
@@ -372,7 +359,8 @@ static void amdgpu_vm_update_stats_locked(struct amdgpu_vm_bo_base *base,
*/
if (bo->flags & AMDGPU_GEM_CREATE_DISCARDABLE)
vm->stats[res_memtype].drm.purgeable += size;
- if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(res_memtype)))
+ if (!(bo->preferred_domains &
+ amdgpu_mem_type_to_domain(res_memtype)))
vm->stats[bo_memtype].evicted += size;
}
}
@@ -391,9 +379,9 @@ void amdgpu_vm_update_stats(struct amdgpu_vm_bo_base *base,
{
struct amdgpu_vm *vm = base->vm;
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->stats_lock);
amdgpu_vm_update_stats_locked(base, res, sign);
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->stats_lock);
}
/**
@@ -419,10 +407,10 @@ void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
base->next = bo->vm_bo;
bo->vm_bo = base;
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->stats_lock);
base->shared = drm_gem_object_is_shared_for_memory_stats(&bo->tbo.base);
amdgpu_vm_update_stats_locked(base, bo->tbo.resource, +1);
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->stats_lock);
if (!amdgpu_vm_is_bo_always_valid(vm, bo))
return;
@@ -481,25 +469,25 @@ int amdgpu_vm_lock_done_list(struct amdgpu_vm *vm, struct drm_exec *exec,
int ret;
/* We can only trust prev->next while holding the lock */
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->invalidated_lock);
while (!list_is_head(prev->next, &vm->done)) {
bo_va = list_entry(prev->next, typeof(*bo_va), base.vm_status);
bo = bo_va->base.bo;
if (bo) {
amdgpu_bo_ref(bo);
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->invalidated_lock);
ret = drm_exec_prepare_obj(exec, &bo->tbo.base, 1);
amdgpu_bo_unref(&bo);
if (unlikely(ret))
return ret;
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->invalidated_lock);
}
prev = prev->next;
}
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->invalidated_lock);
return 0;
}
@@ -595,7 +583,7 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
void *param)
{
uint64_t new_vm_generation = amdgpu_vm_generation(adev, vm);
- struct amdgpu_vm_bo_base *bo_base;
+ struct amdgpu_vm_bo_base *bo_base, *tmp;
struct amdgpu_bo *bo;
int r;
@@ -608,13 +596,7 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return r;
}
- spin_lock(&vm->status_lock);
- while (!list_empty(&vm->evicted)) {
- bo_base = list_first_entry(&vm->evicted,
- struct amdgpu_vm_bo_base,
- vm_status);
- spin_unlock(&vm->status_lock);
-
+ list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
bo = bo_base->bo;
r = validate(param, bo);
@@ -627,26 +609,21 @@ int amdgpu_vm_validate(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->update_funcs->map_table(to_amdgpu_bo_vm(bo));
amdgpu_vm_bo_relocated(bo_base);
}
- spin_lock(&vm->status_lock);
}
- while (ticket && !list_empty(&vm->evicted_user)) {
- bo_base = list_first_entry(&vm->evicted_user,
- struct amdgpu_vm_bo_base,
- vm_status);
- spin_unlock(&vm->status_lock);
- bo = bo_base->bo;
- dma_resv_assert_held(bo->tbo.base.resv);
+ if (ticket) {
+ list_for_each_entry_safe(bo_base, tmp, &vm->evicted_user,
+ vm_status) {
+ bo = bo_base->bo;
+ dma_resv_assert_held(bo->tbo.base.resv);
- r = validate(param, bo);
- if (r)
- return r;
-
- amdgpu_vm_bo_invalidated(bo_base);
+ r = validate(param, bo);
+ if (r)
+ return r;
- spin_lock(&vm->status_lock);
+ amdgpu_vm_bo_invalidated(bo_base);
+ }
}
- spin_unlock(&vm->status_lock);
amdgpu_vm_eviction_lock(vm);
vm->evicting = false;
@@ -675,9 +652,7 @@ bool amdgpu_vm_ready(struct amdgpu_vm *vm)
ret = !vm->evicting;
amdgpu_vm_eviction_unlock(vm);
- spin_lock(&vm->status_lock);
ret &= list_empty(&vm->evicted);
- spin_unlock(&vm->status_lock);
spin_lock(&vm->immediate.lock);
ret &= !vm->immediate.stopped;
@@ -971,18 +946,13 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
struct amdgpu_vm *vm, bool immediate)
{
struct amdgpu_vm_update_params params;
- struct amdgpu_vm_bo_base *entry;
+ struct amdgpu_vm_bo_base *entry, *tmp;
bool flush_tlb_needed = false;
- LIST_HEAD(relocated);
int r, idx;
amdgpu_vm_assert_locked(vm);
- spin_lock(&vm->status_lock);
- list_splice_init(&vm->relocated, &relocated);
- spin_unlock(&vm->status_lock);
-
- if (list_empty(&relocated))
+ if (list_empty(&vm->relocated))
return 0;
if (!drm_dev_enter(adev_to_drm(adev), &idx))
@@ -998,7 +968,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
if (r)
goto error;
- list_for_each_entry(entry, &relocated, vm_status) {
+ list_for_each_entry(entry, &vm->relocated, vm_status) {
/* vm_flush_needed after updating moved PDEs */
flush_tlb_needed |= entry->moved;
@@ -1014,9 +984,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
if (flush_tlb_needed)
atomic64_inc(&vm->tlb_seq);
- while (!list_empty(&relocated)) {
- entry = list_first_entry(&relocated, struct amdgpu_vm_bo_base,
- vm_status);
+ list_for_each_entry_safe(entry, tmp, &vm->relocated, vm_status) {
amdgpu_vm_bo_idle(entry);
}
@@ -1243,9 +1211,9 @@ error_free:
void amdgpu_vm_get_memory(struct amdgpu_vm *vm,
struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM])
{
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->stats_lock);
memcpy(stats, vm->stats, sizeof(*stats) * __AMDGPU_PL_NUM);
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->stats_lock);
}
/**
@@ -1612,29 +1580,24 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct ww_acquire_ctx *ticket)
{
- struct amdgpu_bo_va *bo_va;
+ struct amdgpu_bo_va *bo_va, *tmp;
struct dma_resv *resv;
bool clear, unlock;
int r;
- spin_lock(&vm->status_lock);
- while (!list_empty(&vm->moved)) {
- bo_va = list_first_entry(&vm->moved, struct amdgpu_bo_va,
- base.vm_status);
- spin_unlock(&vm->status_lock);
-
+ list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
/* Per VM BOs never need to bo cleared in the page tables */
r = amdgpu_vm_bo_update(adev, bo_va, false);
if (r)
return r;
- spin_lock(&vm->status_lock);
}
+ spin_lock(&vm->invalidated_lock);
while (!list_empty(&vm->invalidated)) {
bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
base.vm_status);
resv = bo_va->base.bo->tbo.base.resv;
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->invalidated_lock);
/* Try to reserve the BO to avoid clearing its ptes */
if (!adev->debug_vm && dma_resv_trylock(resv)) {
@@ -1666,9 +1629,9 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
bo_va->base.bo->tbo.resource->mem_type == TTM_PL_SYSTEM))
amdgpu_vm_bo_evicted_user(&bo_va->base);
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->invalidated_lock);
}
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->invalidated_lock);
return 0;
}
@@ -2211,9 +2174,9 @@ void amdgpu_vm_bo_del(struct amdgpu_device *adev,
}
}
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->invalidated_lock);
list_del(&bo_va->base.vm_status);
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->invalidated_lock);
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list);
@@ -2321,10 +2284,10 @@ void amdgpu_vm_bo_move(struct amdgpu_bo *bo, struct ttm_resource *new_mem,
for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
struct amdgpu_vm *vm = bo_base->vm;
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->stats_lock);
amdgpu_vm_update_stats_locked(bo_base, bo->tbo.resource, -1);
amdgpu_vm_update_stats_locked(bo_base, new_mem, +1);
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->stats_lock);
}
amdgpu_vm_bo_invalidate(bo, evicted);
@@ -2593,11 +2556,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
INIT_LIST_HEAD(&vm->relocated);
INIT_LIST_HEAD(&vm->moved);
INIT_LIST_HEAD(&vm->idle);
+ spin_lock_init(&vm->invalidated_lock);
INIT_LIST_HEAD(&vm->invalidated);
- spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->freed);
INIT_LIST_HEAD(&vm->done);
INIT_KFIFO(vm->faults);
+ spin_lock_init(&vm->stats_lock);
r = amdgpu_vm_init_entities(adev, vm);
if (r)
@@ -3065,7 +3029,6 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
amdgpu_vm_assert_locked(vm);
- spin_lock(&vm->status_lock);
seq_puts(m, "\tIdle BOs:\n");
list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
if (!bo_va->base.bo)
@@ -3103,11 +3066,13 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
id = 0;
seq_puts(m, "\tInvalidated BOs:\n");
+ spin_lock(&vm->invalidated_lock);
list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
if (!bo_va->base.bo)
continue;
total_invalidated += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
}
+ spin_unlock(&vm->invalidated_lock);
total_invalidated_objs = id;
id = 0;
@@ -3117,7 +3082,6 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m)
continue;
total_done += amdgpu_bo_print_info(id++, bo_va->base.bo, m);
}
- spin_unlock(&vm->status_lock);
total_done_objs = id;
seq_printf(m, "\tTotal idle size: %12lld\tobjs:\t%d\n", total_idle,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 46628b0e699b..87b0617d4661 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -205,11 +205,11 @@ struct amdgpu_vm_bo_base {
/* protected by bo being reserved */
struct amdgpu_vm_bo_base *next;
- /* protected by vm status_lock */
+ /* protected by vm reservation and invalidated_lock */
struct list_head vm_status;
/* if the bo is counted as shared in mem stats
- * protected by vm status_lock */
+ * protected by vm BO being reserved */
bool shared;
/* protected by the BO being reserved */
@@ -345,10 +345,8 @@ struct amdgpu_vm {
bool evicting;
unsigned int saved_flags;
- /* Lock to protect vm_bo add/del/move on all lists of vm */
- spinlock_t status_lock;
-
- /* Memory statistics for this vm, protected by status_lock */
+ /* Memory statistics for this vm, protected by stats_lock */
+ spinlock_t stats_lock;
struct amdgpu_mem_stats stats[__AMDGPU_PL_NUM];
/*
@@ -356,6 +354,8 @@ struct amdgpu_vm {
* PDs, PTs or per VM BOs. The state transits are:
*
* evicted -> relocated (PDs, PTs) or moved (per VM BOs) -> idle
+ *
+ * Lists are protected by the root PD dma_resv lock.
*/
/* Per-VM and PT BOs who needs a validation */
@@ -376,7 +376,10 @@ struct amdgpu_vm {
* state transits are:
*
* evicted_user or invalidated -> done
+ *
+ * Lists are protected by the invalidated_lock.
*/
+ spinlock_t invalidated_lock;
/* BOs for user mode queues that need a validation */
struct list_head evicted_user;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
index 31a437ce9570..7bdd664f0770 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c
@@ -544,9 +544,7 @@ static void amdgpu_vm_pt_free(struct amdgpu_vm_bo_base *entry)
entry->bo->vm_bo = NULL;
ttm_bo_set_bulk_move(&entry->bo->tbo, NULL);
- spin_lock(&entry->vm->status_lock);
list_del(&entry->vm_status);
- spin_unlock(&entry->vm->status_lock);
amdgpu_bo_unref(&entry->bo);
}
@@ -590,7 +588,6 @@ static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params,
struct amdgpu_vm_pt_cursor seek;
struct amdgpu_vm_bo_base *entry;
- spin_lock(&params->vm->status_lock);
for_each_amdgpu_vm_pt_dfs_safe(params->adev, params->vm, cursor, seek, entry) {
if (entry && entry->bo)
list_move(&entry->vm_status, &params->tlb_flush_waitlist);
@@ -598,7 +595,6 @@ static void amdgpu_vm_pt_add_list(struct amdgpu_vm_update_params *params,
/* enter start node now */
list_move(&cursor->entry->vm_status, &params->tlb_flush_waitlist);
- spin_unlock(&params->vm->status_lock);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
index a841f342a3eb..847cfd1fd004 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h
@@ -161,7 +161,8 @@ union amd_sriov_msg_feature_flags {
uint32_t ras_telemetry : 1;
uint32_t ras_cper : 1;
uint32_t xgmi_ta_ext_peer_link : 1;
- uint32_t reserved : 19;
+ uint32_t xgmi_connected_to_cpu : 1;
+ uint32_t reserved : 18;
} flags;
uint32_t all;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index f1052acea5ec..c8f465158e71 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -1298,7 +1298,7 @@ static void dce_v10_0_audio_write_speaker_allocation(struct drm_encoder *encoder
return;
}
- sad_count = drm_edid_to_speaker_allocation(amdgpu_connector->edid, &sadb);
+ sad_count = drm_edid_to_speaker_allocation(drm_edid_raw(amdgpu_connector->edid), &sadb);
if (sad_count < 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
sad_count = 0;
@@ -1368,7 +1368,7 @@ static void dce_v10_0_audio_write_sad_regs(struct drm_encoder *encoder)
return;
}
- sad_count = drm_edid_to_sad(amdgpu_connector->edid, &sads);
+ sad_count = drm_edid_to_sad(drm_edid_raw(amdgpu_connector->edid), &sads);
if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
if (sad_count <= 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index c153a6e1e22a..58d0da5c2a74 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1265,7 +1265,7 @@ static void dce_v6_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
return;
}
- sad_count = drm_edid_to_speaker_allocation(amdgpu_connector->edid, &sadb);
+ sad_count = drm_edid_to_speaker_allocation(drm_edid_raw(amdgpu_connector->edid), &sadb);
if (sad_count < 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
sad_count = 0;
@@ -1346,7 +1346,7 @@ static void dce_v6_0_audio_write_sad_regs(struct drm_encoder *encoder)
return;
}
- sad_count = drm_edid_to_sad(amdgpu_connector->edid, &sads);
+ sad_count = drm_edid_to_sad(drm_edid_raw(amdgpu_connector->edid), &sads);
if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
if (sad_count <= 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index a85a9e32fde4..6d19f6d94d25 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -1271,7 +1271,7 @@ static void dce_v8_0_audio_write_speaker_allocation(struct drm_encoder *encoder)
return;
}
- sad_count = drm_edid_to_speaker_allocation(amdgpu_connector->edid, &sadb);
+ sad_count = drm_edid_to_speaker_allocation(drm_edid_raw(amdgpu_connector->edid), &sadb);
if (sad_count < 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
sad_count = 0;
@@ -1339,7 +1339,7 @@ static void dce_v8_0_audio_write_sad_regs(struct drm_encoder *encoder)
return;
}
- sad_count = drm_edid_to_sad(amdgpu_connector->edid, &sads);
+ sad_count = drm_edid_to_sad(drm_edid_raw(amdgpu_connector->edid), &sads);
if (sad_count < 0)
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
if (sad_count <= 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c
index 557d15b90ad2..4e02b62cdbb3 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_1.c
@@ -1155,11 +1155,13 @@ static int gfx_v12_1_sw_init(struct amdgpu_ip_block *ip_block)
break;
}
- /* recalculate compute rings to use based on hardware configuration */
- num_compute_rings = (adev->gfx.mec.num_pipe_per_mec *
- adev->gfx.mec.num_queue_per_pipe) / 2;
- adev->gfx.num_compute_rings = min(adev->gfx.num_compute_rings,
- num_compute_rings);
+ if (adev->gfx.num_compute_rings) {
+ /* recalculate compute rings to use based on hardware configuration */
+ num_compute_rings = (adev->gfx.mec.num_pipe_per_mec *
+ adev->gfx.mec.num_queue_per_pipe) / 2;
+ adev->gfx.num_compute_rings = min(adev->gfx.num_compute_rings,
+ num_compute_rings);
+ }
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
@@ -2794,6 +2796,33 @@ static void gfx_v12_1_xcc_fini(struct amdgpu_device *adev,
gfx_v12_1_xcc_enable_gui_idle_interrupt(adev, false, xcc_id);
}
+static int gfx_v12_1_set_userq_eop_interrupts(struct amdgpu_device *adev,
+ bool enable)
+{
+ unsigned int irq_type;
+ int m, p, r;
+
+ if (adev->gfx.disable_kq) {
+ for (m = 0; m < adev->gfx.mec.num_mec; ++m) {
+ for (p = 0; p < adev->gfx.mec.num_pipe_per_mec; p++) {
+ irq_type = AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ + (m * adev->gfx.mec.num_pipe_per_mec)
+ + p;
+ if (enable)
+ r = amdgpu_irq_get(adev, &adev->gfx.eop_irq,
+ irq_type);
+ else
+ r = amdgpu_irq_put(adev, &adev->gfx.eop_irq,
+ irq_type);
+ if (r)
+ return r;
+ }
+ }
+ }
+
+ return 0;
+}
+
static int gfx_v12_1_hw_fini(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
@@ -2801,6 +2830,7 @@ static int gfx_v12_1_hw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
+ gfx_v12_1_set_userq_eop_interrupts(adev, false);
num_xcc = NUM_XCC(adev->gfx.xcc_mask);
for (i = 0; i < num_xcc; i++) {
@@ -2868,10 +2898,26 @@ static int gfx_v12_1_early_init(struct amdgpu_ip_block *ip_block)
{
struct amdgpu_device *adev = ip_block->adev;
+
+ switch (amdgpu_user_queue) {
+ case -1:
+ default:
+ adev->gfx.disable_kq = true;
+ adev->gfx.disable_uq = true;
+ break;
+ case 0:
+ adev->gfx.disable_kq = false;
+ adev->gfx.disable_uq = true;
+ break;
+ }
+
adev->gfx.funcs = &gfx_v12_1_gfx_funcs;
- adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
- AMDGPU_MAX_COMPUTE_RINGS);
+ if (adev->gfx.disable_kq)
+ adev->gfx.num_compute_rings = 0;
+ else
+ adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
+ AMDGPU_MAX_COMPUTE_RINGS);
gfx_v12_1_set_kiq_pm4_funcs(adev);
gfx_v12_1_set_ring_funcs(adev);
@@ -2898,6 +2944,10 @@ static int gfx_v12_1_late_init(struct amdgpu_ip_block *ip_block)
if (r)
return r;
+ r = gfx_v12_1_set_userq_eop_interrupts(adev, true);
+ if (r)
+ return r;
+
return 0;
}
@@ -3630,12 +3680,6 @@ static int gfx_v12_1_eop_irq(struct amdgpu_device *adev,
return -EINVAL;
switch (me_id) {
- case 0:
- if (pipe_id == 0)
- amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
- else
- amdgpu_fence_process(&adev->gfx.gfx_ring[1]);
- break;
case 1:
case 2:
for (i = 0; i < adev->gfx.num_compute_rings; i++) {
@@ -3652,6 +3696,9 @@ static int gfx_v12_1_eop_irq(struct amdgpu_device *adev,
amdgpu_fence_process(ring);
}
break;
+ default:
+ dev_dbg(adev->dev, "Unexpected me %d in eop_irq\n", me_id);
+ break;
}
}
@@ -3719,29 +3766,23 @@ static void gfx_v12_1_handle_priv_fault(struct amdgpu_device *adev,
if (xcc_id == -EINVAL)
return;
- switch (me_id) {
- case 0:
- for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
- ring = &adev->gfx.gfx_ring[i];
- /* we only enabled 1 gfx queue per pipe for now */
- if (ring->me == me_id && ring->pipe == pipe_id)
- drm_sched_fault(&ring->sched);
- }
- break;
- case 1:
- case 2:
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- ring = &adev->gfx.compute_ring
+ if (!adev->gfx.disable_kq) {
+ switch (me_id) {
+ case 1:
+ case 2:
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ ring = &adev->gfx.compute_ring
[i +
xcc_id * adev->gfx.num_compute_rings];
- if (ring->me == me_id && ring->pipe == pipe_id &&
- ring->queue == queue_id)
- drm_sched_fault(&ring->sched);
+ if (ring->me == me_id && ring->pipe == pipe_id &&
+ ring->queue == queue_id)
+ drm_sched_fault(&ring->sched);
+ }
+ break;
+ default:
+ dev_dbg(adev->dev, "Unexpected me %d in priv_fault\n", me_id);
+ break;
}
- break;
- default:
- BUG();
- break;
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 7e9d753f4a80..95be105671ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -2355,7 +2355,7 @@ static int gfx_v9_0_sw_init(struct amdgpu_ip_block *ip_block)
for (i = 0; i < GFX9_NUM_SW_GFX_RINGS; i++) {
ring = &adev->gfx.sw_gfx_ring[i];
ring->ring_obj = NULL;
- sprintf(ring->name, amdgpu_sw_ring_name(i));
+ strscpy(ring->name, amdgpu_sw_ring_name(i), sizeof(ring->name));
ring->use_doorbell = true;
ring->doorbell_index = adev->doorbell_index.gfx_ring0 << 1;
ring->is_sw_ring = true;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v12_1.c b/drivers/gpu/drm/amd/amdgpu/gmc_v12_1.c
index dc8865c5879c..c49112d8300e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v12_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v12_1.c
@@ -121,7 +121,7 @@ static int gmc_v12_1_process_interrupt(struct amdgpu_device *adev,
if (entry->src_id == UTCL2_1_0__SRCID__RETRY) {
retry_fault = true;
- write_fault = !!(entry->src_data[1] & 0x200000);
+ write_fault = !!(entry->src_data[1] & AMDGPU_GMC121_FAULT_SOURCE_DATA_WRITE);
}
if (entry->client_id == SOC_V1_0_IH_CLIENTID_VMC) {
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
index 451828bf583e..1fbe904f4223 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v7_0.c
@@ -289,6 +289,13 @@ static uint32_t ih_v7_0_setup_retry_doorbell(u32 doorbell_index)
return val;
}
+#define regIH_RING1_CLIENT_CFG_INDEX_V7_1 0x122
+#define regIH_RING1_CLIENT_CFG_INDEX_V7_1_BASE_IDX 0
+#define regIH_RING1_CLIENT_CFG_DATA_V7_1 0x123
+#define regIH_RING1_CLIENT_CFG_DATA_V7_1_BASE_IDX 0
+#define regIH_CHICKEN_V7_1 0x129
+#define regIH_CHICKEN_V7_1_BASE_IDX 0
+
/**
* ih_v7_0_irq_init - init and enable the interrupt ring
*
@@ -307,6 +314,7 @@ static int ih_v7_0_irq_init(struct amdgpu_device *adev)
u32 tmp;
int ret;
int i;
+ u32 reg_addr;
/* disable irqs */
ret = ih_v7_0_toggle_interrupts(adev, false);
@@ -318,10 +326,15 @@ static int ih_v7_0_irq_init(struct amdgpu_device *adev)
if (unlikely((adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) ||
(adev->firmware.load_type == AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO))) {
if (ih[0]->use_bus_addr) {
- ih_chicken = RREG32_SOC15(OSSSYS, 0, regIH_CHICKEN);
+ if (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(7, 1, 0))
+ reg_addr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_CHICKEN_V7_1);
+ else
+ reg_addr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_CHICKEN);
+ ih_chicken = RREG32(reg_addr);
+ /* The reg fields definitions are identical in ih v7_0 and ih v7_1 */
ih_chicken = REG_SET_FIELD(ih_chicken,
IH_CHICKEN, MC_SPACE_GPA_ENABLE, 1);
- WREG32_SOC15(OSSSYS, 0, regIH_CHICKEN, ih_chicken);
+ WREG32(reg_addr, ih_chicken);
}
}
@@ -358,17 +371,26 @@ static int ih_v7_0_irq_init(struct amdgpu_device *adev)
/* Redirect the interrupts to IH RB1 for dGPU */
if (adev->irq.ih1.ring_size) {
- tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX);
+ if (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(7, 1, 0))
+ reg_addr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX_V7_1);
+ else
+ reg_addr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX);
+ tmp = RREG32(reg_addr);
+ /* The reg fields definitions are identical in ih v7_0 and ih v7_1 */
tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_INDEX, INDEX, 0);
- WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_INDEX, tmp);
+ WREG32(reg_addr, tmp);
- tmp = RREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA);
+ if (amdgpu_ip_version(adev, OSSSYS_HWIP, 0) == IP_VERSION(7, 1, 0))
+ reg_addr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA_V7_1);
+ else
+ reg_addr = SOC15_REG_OFFSET(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA);
+ tmp = RREG32(reg_addr);
+ /* The reg fields definitions are identical in ih v7_0 and ih v7_1 */
tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, CLIENT_ID, 0xa);
tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA, SOURCE_ID, 0x0);
tmp = REG_SET_FIELD(tmp, IH_RING1_CLIENT_CFG_DATA,
SOURCE_ID_MATCH_ENABLE, 0x1);
-
- WREG32_SOC15(OSSSYS, 0, regIH_RING1_CLIENT_CFG_DATA, tmp);
+ WREG32(reg_addr, tmp);
}
pci_set_master(adev->pdev);
diff --git a/drivers/gpu/drm/amd/amdgpu/lsdma_v7_1.c b/drivers/gpu/drm/amd/amdgpu/lsdma_v7_1.c
new file mode 100644
index 000000000000..d93a0e65ce7d
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/lsdma_v7_1.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright 2026 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include <linux/delay.h>
+#include "amdgpu.h"
+#include "lsdma_v7_1.h"
+#include "amdgpu_lsdma.h"
+
+#include "lsdma/lsdma_7_1_0_offset.h"
+#include "lsdma/lsdma_7_1_0_sh_mask.h"
+
+static int lsdma_v7_1_wait_pio_status(struct amdgpu_device *adev)
+{
+ return amdgpu_lsdma_wait_for(adev, SOC15_REG_OFFSET(LSDMA, 0, regLSDMA_PIO_STATUS),
+ LSDMA_PIO_STATUS__PIO_IDLE_MASK | LSDMA_PIO_STATUS__PIO_FIFO_EMPTY_MASK,
+ LSDMA_PIO_STATUS__PIO_IDLE_MASK | LSDMA_PIO_STATUS__PIO_FIFO_EMPTY_MASK);
+}
+
+static int lsdma_v7_1_copy_mem(struct amdgpu_device *adev,
+ uint64_t src_addr,
+ uint64_t dst_addr,
+ uint64_t size)
+{
+ int ret;
+ uint32_t tmp;
+
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_SRC_ADDR_LO, lower_32_bits(src_addr));
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_SRC_ADDR_HI, upper_32_bits(src_addr));
+
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_LO, lower_32_bits(dst_addr));
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_HI, upper_32_bits(dst_addr));
+
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_CONTROL, 0x0);
+
+ tmp = RREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, COUNT, size);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, RAW_WAIT, 0);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, CONSTANT_FILL, 0);
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND, tmp);
+
+ ret = lsdma_v7_1_wait_pio_status(adev);
+ if (ret)
+ dev_err(adev->dev, "LSDMA PIO failed to copy memory!\n");
+
+ return ret;
+}
+
+static int lsdma_v7_1_fill_mem(struct amdgpu_device *adev,
+ uint64_t dst_addr,
+ uint32_t data,
+ uint64_t size)
+{
+ int ret;
+ uint32_t tmp;
+
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_CONSTFILL_DATA, data);
+
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_LO, lower_32_bits(dst_addr));
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_DST_ADDR_HI, upper_32_bits(dst_addr));
+
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_CONTROL, 0x0);
+
+ tmp = RREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, COUNT, size);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, RAW_WAIT, 0);
+ tmp = REG_SET_FIELD(tmp, LSDMA_PIO_COMMAND, CONSTANT_FILL, 1);
+ WREG32_SOC15(LSDMA, 0, regLSDMA_PIO_COMMAND, tmp);
+
+ ret = lsdma_v7_1_wait_pio_status(adev);
+ if (ret)
+ dev_err(adev->dev, "LSDMA PIO failed to fill memory!\n");
+
+ return ret;
+}
+
+const struct amdgpu_lsdma_funcs lsdma_v7_1_funcs = {
+ .copy_mem = lsdma_v7_1_copy_mem,
+ .fill_mem = lsdma_v7_1_fill_mem,
+};
diff --git a/drivers/gpu/drm/amd/amdgpu/lsdma_v7_1.h b/drivers/gpu/drm/amd/amdgpu/lsdma_v7_1.h
new file mode 100644
index 000000000000..3d1ab605849a
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/lsdma_v7_1.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2026 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __LSDMA_V7_1_H__
+#define __LSDMA_V7_1_H__
+
+#include "soc15_common.h"
+
+extern const struct amdgpu_lsdma_funcs lsdma_v7_1_funcs;
+
+#endif /* __LSDMA_V7_1_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index 5bfa5d1d0b36..023c7345ea54 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -731,6 +731,9 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
int i;
struct amdgpu_device *adev = mes->adev;
union MESAPI_SET_HW_RESOURCES mes_set_hw_res_pkt;
+ uint32_t mes_rev = (pipe == AMDGPU_MES_SCHED_PIPE) ?
+ (mes->sched_version & AMDGPU_MES_VERSION_MASK) :
+ (mes->kiq_version & AMDGPU_MES_VERSION_MASK);
memset(&mes_set_hw_res_pkt, 0, sizeof(mes_set_hw_res_pkt));
@@ -785,7 +788,7 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
* handling support, other queue will not use the oversubscribe timer.
* handling mode - 0: disabled; 1: basic version; 2: basic+ version
*/
- mes_set_hw_res_pkt.oversubscription_timer = 50;
+ mes_set_hw_res_pkt.oversubscription_timer = mes_rev < 0x8b ? 0 : 50;
mes_set_hw_res_pkt.unmapped_doorbell_handling = 1;
if (amdgpu_mes_log_enable) {
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_1.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_1.c
index 7b8c670d0a9e..d8e4b52bdfd5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_1.c
@@ -1611,7 +1611,6 @@ static int mes_v12_1_sw_fini(struct amdgpu_ip_block *ip_block)
amdgpu_bo_free_kernel(&adev->mes.eop_gpu_obj[inst],
&adev->mes.eop_gpu_addr[inst],
NULL);
- amdgpu_ucode_release(&adev->mes.fw[inst]);
if (adev->enable_uni_mes || pipe == AMDGPU_MES_SCHED_PIPE) {
amdgpu_bo_free_kernel(&adev->mes.ring[inst].mqd_obj,
@@ -1622,6 +1621,9 @@ static int mes_v12_1_sw_fini(struct amdgpu_ip_block *ip_block)
}
}
+ for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++)
+ amdgpu_ucode_release(&adev->mes.fw[pipe]);
+
for (xcc_id = 0; xcc_id < num_xcc; xcc_id++) {
if (!adev->enable_uni_mes) {
amdgpu_bo_free_kernel(&adev->gfx.kiq[xcc_id].ring.mqd_obj,
diff --git a/drivers/gpu/drm/amd/amdgpu/soc24.c b/drivers/gpu/drm/amd/amdgpu/soc24.c
index 308f32daa780..d1adf19a51c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc24.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc24.c
@@ -478,7 +478,7 @@ static int soc24_common_hw_init(struct amdgpu_ip_block *ip_block)
if (adev->nbio.funcs->remap_hdp_registers)
adev->nbio.funcs->remap_hdp_registers(adev);
- if (adev->df.funcs->hw_init)
+ if (adev->df.funcs && adev->df.funcs->hw_init)
adev->df.funcs->hw_init(adev);
/* enable the doorbell aperture */
diff --git a/drivers/gpu/drm/amd/amdgpu/soc_v1_0.c b/drivers/gpu/drm/amd/amdgpu/soc_v1_0.c
index 26e7566a5479..0c7759b82fa6 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc_v1_0.c
@@ -57,7 +57,7 @@ static void soc_v1_0_doorbell_index_init(struct amdgpu_device *adev)
adev->doorbell_index.userqueue_end = AMDGPU_SOC_V1_0_DOORBELL_USERQUEUE_END;
adev->doorbell_index.xcc_doorbell_range = AMDGPU_SOC_V1_0_DOORBELL_XCC_RANGE;
- adev->doorbell_index.sdma_doorbell_range = 20;
+ adev->doorbell_index.sdma_doorbell_range = 14;
for (i = 0; i < adev->sdma.num_instances; i++)
adev->doorbell_index.sdma_engine[i] =
AMDGPU_SOC_V1_0_DOORBELL_sDMA_ENGINE_START +
@@ -214,23 +214,35 @@ static bool soc_v1_0_need_full_reset(struct amdgpu_device *adev)
static bool soc_v1_0_need_reset_on_init(struct amdgpu_device *adev)
{
- u32 sol_reg;
- if (adev->flags & AMD_IS_APU)
- return false;
+ return false;
+}
- /* Check sOS sign of life register to confirm sys driver and sOS
- * are already been loaded.
- */
- sol_reg = RREG32_SOC15(MP0, 0, regMPASP_SMN_C2PMSG_81);
- if (sol_reg)
- return true;
+static enum amd_reset_method
+soc_v1_0_asic_reset_method(struct amdgpu_device *adev)
+{
+ if ((adev->gmc.xgmi.supported && adev->gmc.xgmi.connected_to_cpu) ||
+ (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(15, 0, 8))) {
+ if (amdgpu_reset_method != -1)
+ dev_warn_once(adev->dev, "Reset override isn't supported, using Mode2 instead.\n");
- return false;
+ return AMD_RESET_METHOD_MODE2;
+ }
+
+ return amdgpu_reset_method;
}
static int soc_v1_0_asic_reset(struct amdgpu_device *adev)
{
+ switch (soc_v1_0_asic_reset_method(adev)) {
+ case AMD_RESET_METHOD_MODE2:
+ dev_info(adev->dev, "MODE2 reset\n");
+ return amdgpu_dpm_mode2_reset(adev);
+ default:
+ dev_info(adev->dev, "Invalid reset method Not supported\n");
+ return -EOPNOTSUPP;
+ }
+
return 0;
}
@@ -244,6 +256,7 @@ static const struct amdgpu_asic_funcs soc_v1_0_asic_funcs = {
.need_reset_on_init = &soc_v1_0_need_reset_on_init,
.encode_ext_smn_addressing = &soc_v1_0_encode_ext_smn_addressing,
.reset = soc_v1_0_asic_reset,
+ .reset_method = &soc_v1_0_asic_reset_method,
};
static int soc_v1_0_common_early_init(struct amdgpu_ip_block *ip_block)
@@ -268,7 +281,8 @@ static int soc_v1_0_common_early_init(struct amdgpu_ip_block *ip_block)
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(12, 1, 0):
- adev->cg_flags = 0;
+ adev->cg_flags = AMD_CG_SUPPORT_GFX_CGCG |
+ AMD_CG_SUPPORT_GFX_CGLS;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x50;
break;
@@ -809,7 +823,7 @@ int soc_v1_0_init_soc_config(struct amdgpu_device *adev)
{
int ret, i;
int xcc_inst_per_aid = 4;
- uint16_t xcc_mask;
+ uint16_t xcc_mask, sdma_mask = 0;
xcc_mask = adev->gfx.xcc_mask;
adev->aid_mask = 0;
@@ -819,10 +833,12 @@ int soc_v1_0_init_soc_config(struct amdgpu_device *adev)
}
adev->sdma.num_inst_per_xcc = 2;
- adev->sdma.num_instances =
- NUM_XCC(adev->gfx.xcc_mask) * adev->sdma.num_inst_per_xcc;
- adev->sdma.sdma_mask =
- GENMASK(adev->sdma.num_instances - 1, 0);
+ for_each_inst(i, adev->gfx.xcc_mask)
+ sdma_mask |=
+ GENMASK(adev->sdma.num_inst_per_xcc - 1, 0) <<
+ (i * adev->sdma.num_inst_per_xcc);
+ adev->sdma.sdma_mask = sdma_mask;
+ adev->sdma.num_instances = NUM_XCC(adev->sdma.sdma_mask);
ret = soc_v1_0_xcp_mgr_init(adev);
if (ret)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 3ddf06c755b5..ab3b2e7be9bd 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -2720,7 +2720,7 @@ static int get_wave_state(struct device_queue_manager *dqm,
ctl_stack, ctl_stack_used_size, save_area_used_size);
}
-static void get_queue_checkpoint_info(struct device_queue_manager *dqm,
+static int get_queue_checkpoint_info(struct device_queue_manager *dqm,
const struct queue *q,
u32 *mqd_size,
u32 *ctl_stack_size)
@@ -2728,6 +2728,7 @@ static void get_queue_checkpoint_info(struct device_queue_manager *dqm,
struct mqd_manager *mqd_mgr;
enum KFD_MQD_TYPE mqd_type =
get_mqd_type_from_queue_type(q->properties.type);
+ int ret = 0;
dqm_lock(dqm);
mqd_mgr = dqm->mqd_mgrs[mqd_type];
@@ -2735,9 +2736,11 @@ static void get_queue_checkpoint_info(struct device_queue_manager *dqm,
*ctl_stack_size = 0;
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE && mqd_mgr->get_checkpoint_info)
- mqd_mgr->get_checkpoint_info(mqd_mgr, q->mqd, ctl_stack_size);
+ ret = mqd_mgr->get_checkpoint_info(mqd_mgr, q->mqd, ctl_stack_size);
dqm_unlock(dqm);
+
+ return ret;
}
static int checkpoint_mqd(struct device_queue_manager *dqm,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index ef07e44916f8..3272328da11f 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -192,7 +192,7 @@ struct device_queue_manager_ops {
int (*reset_queues)(struct device_queue_manager *dqm,
uint16_t pasid);
- void (*get_queue_checkpoint_info)(struct device_queue_manager *dqm,
+ int (*get_queue_checkpoint_info)(struct device_queue_manager *dqm,
const struct queue *q, u32 *mqd_size,
u32 *ctl_stack_size);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
index 2429d278ef0e..06ca6235ff1b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
@@ -102,7 +102,8 @@ struct mqd_manager {
u32 *ctl_stack_used_size,
u32 *save_area_used_size);
- void (*get_checkpoint_info)(struct mqd_manager *mm, void *mqd, uint32_t *ctl_stack_size);
+ int (*get_checkpoint_info)(struct mqd_manager *mm, void *mqd,
+ uint32_t *ctl_stack_size);
void (*checkpoint_mqd)(struct mqd_manager *mm,
void *mqd,
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 19f21932a5ce..979ae94ac966 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -385,11 +385,14 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd,
return 0;
}
-static void get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stack_size)
+static int get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stack_size)
{
struct v9_mqd *m = get_mqd(mqd);
- *ctl_stack_size = m->cp_hqd_cntl_stack_size * NUM_XCC(mm->dev->xcc_mask);
+ if (check_mul_overflow(m->cp_hqd_cntl_stack_size, NUM_XCC(mm->dev->xcc_mask), ctl_stack_size))
+ return -EINVAL;
+
+ return 0;
}
static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index f02ef2d44a07..431a20323146 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -274,10 +274,11 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd,
return 0;
}
-static void get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stack_size)
+static int get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stack_size)
{
/* Control stack is stored in user mode */
*ctl_stack_size = 0;
+ return 0;
}
static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 12e24fbf8c46..a031166f270c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -585,7 +585,7 @@ static void kfd_procfs_add_sysfs_stats(struct kfd_process *p)
ret = kobject_init_and_add(pdd->kobj_stats,
&procfs_stats_type,
p->kobj,
- stats_dir_filename);
+ "%s", stats_dir_filename);
if (ret) {
pr_warn("Creating KFD proc/stats_%s folder failed",
@@ -632,7 +632,7 @@ static void kfd_procfs_add_sysfs_counters(struct kfd_process *p)
return;
ret = kobject_init_and_add(kobj_counters, &sysfs_counters_type,
- p->kobj, counters_dir_filename);
+ p->kobj, "%s", counters_dir_filename);
if (ret) {
pr_warn("Creating KFD proc/%s folder failed",
counters_dir_filename);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 8ea31699d38b..cc2621ae12f9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -593,6 +593,7 @@ int pqm_update_queue_properties(struct process_queue_manager *pqm,
p->queue_size)) {
pr_debug("ring buf 0x%llx size 0x%llx not mapped on GPU\n",
p->queue_address, p->queue_size);
+ amdgpu_bo_unreserve(vm->root.bo);
return -EFAULT;
}
@@ -1069,6 +1070,7 @@ int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm,
uint32_t *ctl_stack_size)
{
struct process_queue_node *pqn;
+ int ret;
pqn = get_queue_by_qid(pqm, qid);
if (!pqn) {
@@ -1081,9 +1083,14 @@ int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm,
return -EOPNOTSUPP;
}
- pqn->q->device->dqm->ops.get_queue_checkpoint_info(pqn->q->device->dqm,
+ ret = pqn->q->device->dqm->ops.get_queue_checkpoint_info(pqn->q->device->dqm,
pqn->q, mqd_size,
ctl_stack_size);
+ if (ret) {
+ pr_debug("amdkfd: Overflow while computing stack size for queue %d\n", qid);
+ return ret;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index dfe95c9b8746..65b256a7b6c4 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2473,6 +2473,8 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_7_SCRATCH_MEM
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_IB_MEM
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_SHARED_STATE
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_LSDMA_BUFFER
+ DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_CURSOR_OFFLOAD
};
int r;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index f8c21da62819..d8c69fc94abb 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -62,6 +62,9 @@ static const uint32_t rgb_formats[] = {
DRM_FORMAT_XBGR8888,
DRM_FORMAT_ABGR8888,
DRM_FORMAT_RGB565,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_P010
};
static const uint32_t overlay_formats[] = {
@@ -1908,7 +1911,8 @@ int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
drm_plane_create_zpos_immutable_property(plane, 255);
}
- if (plane->type == DRM_PLANE_TYPE_PRIMARY &&
+ if ((plane->type == DRM_PLANE_TYPE_PRIMARY ||
+ plane->type == DRM_PLANE_TYPE_OVERLAY) &&
plane_cap &&
(plane_cap->pixel_format_support.nv12 ||
plane_cap->pixel_format_support.p010)) {
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
index 242032c047ed..f947f82013c6 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser.c
@@ -794,13 +794,11 @@ static enum bp_result bios_parser_external_encoder_control(
static enum bp_result bios_parser_dac_load_detection(
struct dc_bios *dcb,
- enum engine_id engine_id,
- struct graphics_object_id ext_enc_id)
+ enum engine_id engine_id)
{
struct bios_parser *bp = BP_FROM_DCB(dcb);
struct dc_context *ctx = dcb->ctx;
struct bp_load_detection_parameters bp_params = {0};
- struct bp_external_encoder_control ext_cntl = {0};
enum bp_result bp_result = BP_RESULT_UNSUPPORTED;
uint32_t bios_0_scratch;
uint32_t device_id_mask = 0;
@@ -826,13 +824,6 @@ static enum bp_result bios_parser_dac_load_detection(
bp_params.engine_id = engine_id;
bp_result = bp->cmd_tbl.dac_load_detection(bp, &bp_params);
- } else if (ext_enc_id.id) {
- if (!bp->cmd_tbl.external_encoder_control)
- return BP_RESULT_UNSUPPORTED;
-
- ext_cntl.action = EXTERNAL_ENCODER_CONTROL_DAC_LOAD_DETECT;
- ext_cntl.encoder_id = ext_enc_id;
- bp_result = bp->cmd_tbl.external_encoder_control(bp, &ext_cntl);
}
if (bp_result != BP_RESULT_OK)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index 5ad66d873aad..e91636d033c7 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -48,6 +48,7 @@
#include "dcn32/dcn32_clk_mgr.h"
#include "dcn35/dcn35_clk_mgr.h"
#include "dcn401/dcn401_clk_mgr.h"
+#include "dcn42/dcn42_clk_mgr.h"
int clk_mgr_helper_get_active_display_cnt(
struct dc *dc,
@@ -362,6 +363,18 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
return &clk_mgr->base;
}
break;
+ case AMDGPU_FAMILY_GC_11_5_4: {
+ struct clk_mgr_dcn42 *clk_mgr = kzalloc(sizeof(*clk_mgr), GFP_KERNEL);
+
+ if (clk_mgr == NULL) {
+ BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+
+ dcn42_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
+ return &clk_mgr->base.base;
+ }
+ break;
#endif /* CONFIG_DRM_AMD_DC_FP */
default:
ASSERT(0); /* Unknown Asic */
@@ -419,6 +432,9 @@ void dc_destroy_clk_mgr(struct clk_mgr *clk_mgr_base)
case AMDGPU_FAMILY_GC_12_0_0:
dcn401_clk_mgr_destroy(clk_mgr);
break;
+ case AMDGPU_FAMILY_GC_11_5_4:
+ dcn42_clk_mgr_destroy(clk_mgr);
+ break;
default:
break;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
index b0aba3a6f13c..b48522480dfd 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c
@@ -547,6 +547,7 @@ void dcn3_clk_mgr_construct(
/* in case we don't get a value from the register, use default */
if (clk_mgr->base.dentist_vco_freq_khz == 0)
clk_mgr->base.dentist_vco_freq_khz = 3650000;
+
/* Convert dprefclk units from MHz to KHz */
/* Value already divided by 10, some resolution lost */
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.c
index 55434f046fa2..97c9f0ce13e4 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.c
@@ -31,59 +31,19 @@
#include "link_service.h"
#include "logger_types.h"
+#include "clk/clk_15_0_0_offset.h"
+#include "clk/clk_15_0_0_sh_mask.h"
+#include "dcn/dcn_4_2_0_offset.h"
+#include "dcn/dcn_4_2_0_sh_mask.h"
+
#undef DC_LOGGER
#define DC_LOGGER \
- clk_mgr->base.base.ctx->logger
-
-
-#define DCN_BASE__INST0_SEG1 0x000000C0
-
-#define regCLK8_CLK2_BYPASS_CNTL 0x4c2a
-#define regCLK8_CLK2_BYPASS_CNTL_BASE_IDX 0
-#define CLK8_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT 0x0
-#define CLK8_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV__SHIFT 0x10
-#define CLK8_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
-#define CLK8_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L
-
-#define regDENTIST_DISPCLK_CNTL 0x0064
-#define regDENTIST_DISPCLK_CNTL_BASE_IDX 1
-
-// DENTIST_DISPCLK_CNTL
-#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER__SHIFT 0x0
-#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER__SHIFT 0x8
-#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE__SHIFT 0x13
-#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE__SHIFT 0x14
-#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER__SHIFT 0x18
-#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_WDIVIDER_MASK 0x0000007FL
-#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_RDIVIDER_MASK 0x00007F00L
-#define DENTIST_DISPCLK_CNTL__DENTIST_DISPCLK_CHG_DONE_MASK 0x00080000L
-#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_CHG_DONE_MASK 0x00100000L
-#define DENTIST_DISPCLK_CNTL__DENTIST_DPPCLK_WDIVIDER_MASK 0x7F000000L
-#define mmDENTIST_DISPCLK_CNTL 0x0124
-#define mmCLK8_CLK_TICK_CNT_CONFIG_REG 0x1B851
-#define mmCLK8_CLK0_CURRENT_CNT 0x1B853
-#define mmCLK8_CLK1_CURRENT_CNT 0x1B854
-#define mmCLK8_CLK2_CURRENT_CNT 0x1B855
-#define mmCLK8_CLK3_CURRENT_CNT 0x1B856
-#define mmCLK8_CLK4_CURRENT_CNT 0x1B857
-
-
-#define mmCLK8_CLK0_BYPASS_CNTL 0x1B81A
-#define mmCLK8_CLK1_BYPASS_CNTL 0x1B822
-#define mmCLK8_CLK2_BYPASS_CNTL 0x1B82A
-#define mmCLK8_CLK3_BYPASS_CNTL 0x1B832
-#define mmCLK8_CLK4_BYPASS_CNTL 0x1B83A
-
-
-#define mmCLK8_CLK0_DS_CNTL 0x1B814
-#define mmCLK8_CLK1_DS_CNTL 0x1B81C
-#define mmCLK8_CLK2_DS_CNTL 0x1B824
-#define mmCLK8_CLK3_DS_CNTL 0x1B82C
-#define mmCLK8_CLK4_DS_CNTL 0x1B834
-
-
+ dc_logger
+#define DC_LOGGER_INIT(logger) \
+ struct dal_logger *dc_logger = logger
+#define DCN42_CLKIP_REFCLK 48000
#undef FN
#define FN(reg_name, field_name) \
@@ -92,16 +52,25 @@
#define REG(reg) \
(clk_mgr->regs->reg)
+// for DCN register access
+#define DCN_BASE__INST0_SEG0 0x00000012
+#define DCN_BASE__INST0_SEG1 0x000000C0
+
#define BASE_INNER(seg) DCN_BASE__INST0_SEG ## seg
#define BASE(seg) BASE_INNER(seg)
-#define SR(reg_name)\
- .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + \
- reg ## reg_name
+#define SR(reg_name) \
+ .reg_name = BASE(reg ## reg_name ## _BASE_IDX) + reg ## reg_name
+
+// for CLKIP register access
+#define CLK_BASE__INST0_SEG0 0x00016C00
-#define CLK_SR_DCN42(reg_name)\
- .reg_name = mm ## reg_name
+#define CLK_BASE_INNER(seg) \
+ CLK_BASE__INST0_SEG ## seg
+
+#define CLK_SR_DCN42(reg_name) \
+ .reg_name = CLK_BASE(reg ## reg_name ## _BASE_IDX) + reg ## reg_name
static const struct clk_mgr_registers clk_mgr_regs_dcn42 = {
CLK_REG_LIST_DCN42()
@@ -115,26 +84,21 @@ static const struct clk_mgr_mask clk_mgr_mask_dcn42 = {
CLK_COMMON_MASK_SH_LIST_DCN42(_MASK)
};
-
-
#define TO_CLK_MGR_DCN42(clk_mgr_int)\
container_of(clk_mgr_int, struct clk_mgr_dcn42, base)
-int dcn42_get_active_display_cnt_wa(
- struct dc *dc,
- struct dc_state *context,
- int *all_active_disps)
+bool dcn42_has_active_display(struct dc *dc, const struct dc_state *context)
{
- int i, display_count = 0;
- bool tmds_present = false;
+ int i, active_count = 0;
for (i = 0; i < context->stream_count; i++) {
const struct dc_stream_state *stream = context->streams[i];
- if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A ||
- stream->signal == SIGNAL_TYPE_DVI_SINGLE_LINK ||
- stream->signal == SIGNAL_TYPE_DVI_DUAL_LINK)
- tmds_present = true;
+ /* Checking stream / link detection ensuring that PHY is active*/
+ if (dc_is_hdmi_signal(stream->signal) ||
+ dc_is_dvi_signal(stream->signal) ||
+ (dc_is_dp_signal(stream->signal) && !stream->dpms_off))
+ active_count++;
}
for (i = 0; i < dc->link_count; i++) {
@@ -143,15 +107,53 @@ int dcn42_get_active_display_cnt_wa(
/* abusing the fact that the dig and phy are coupled to see if the phy is enabled */
if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
link->link_enc->funcs->is_dig_enabled(link->link_enc))
- display_count++;
+ active_count++;
+ }
+
+ return active_count > 0;
+}
+
+static uint32_t dcn42_get_clock_freq_from_clkip(struct clk_mgr *clk_mgr_base, enum clock_type clock)
+{
+ struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
+ uint64_t clock_freq_mhz = 0;
+ uint32_t timer_threshold = 0;
+
+ // always safer to read the timer threshold instead of using cached value
+ REG_GET(CLK8_CLK_TICK_CNT_CONFIG_REG, TIMER_THRESHOLD, &timer_threshold);
+
+ if (timer_threshold == 0) {
+ BREAK_TO_DEBUGGER();
+ return 0;
+ }
+
+ switch (clock) {
+ case clock_type_dispclk:
+ clock_freq_mhz = REG_READ(CLK8_CLK0_CURRENT_CNT);
+ break;
+ case clock_type_dppclk:
+ clock_freq_mhz = REG_READ(CLK8_CLK1_CURRENT_CNT);
+ break;
+ case clock_type_dprefclk:
+ clock_freq_mhz = REG_READ(CLK8_CLK2_CURRENT_CNT);
+ break;
+ case clock_type_dcfclk:
+ clock_freq_mhz = REG_READ(CLK8_CLK3_CURRENT_CNT);
+ break;
+ case clock_type_dtbclk:
+ clock_freq_mhz = REG_READ(CLK8_CLK4_CURRENT_CNT);
+ break;
+ default:
+ break;
}
- if (all_active_disps != NULL)
- *all_active_disps = display_count;
- /* WA for hang on HDMI after display off back on*/
- if (display_count == 0 && tmds_present)
- display_count = 1;
- return display_count;
+ clock_freq_mhz *= DCN42_CLKIP_REFCLK;
+ clock_freq_mhz = div_u64(clock_freq_mhz, timer_threshold);
+
+ // there are no DCN clocks over 0xFFFFFFFF MHz
+ ASSERT(clock_freq_mhz <= 0xFFFFFFFF);
+
+ return (uint32_t)clock_freq_mhz;
}
void dcn42_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr,
@@ -213,22 +215,18 @@ void dcn42_update_clocks(struct clk_mgr *clk_mgr_base,
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk;
struct dc *dc = clk_mgr_base->ctx->dc;
- int display_count = 0;
bool update_dppclk = false;
bool update_dispclk = false;
bool dpp_clock_lowered = false;
- int all_active_disps = 0;
+ bool has_active_display;
if (dc->work_arounds.skip_clock_update)
return;
- display_count = dcn42_get_active_display_cnt_wa(dc, context, &all_active_disps);
+ has_active_display = dcn42_has_active_display(dc, context);
- /*dml21 issue*/
- ASSERT(new_clocks->dtbclk_en && new_clocks->ref_dtbclk_khz > 590000); //remove this section if assert is hit
if (new_clocks->dtbclk_en && new_clocks->ref_dtbclk_khz < 590000)
new_clocks->ref_dtbclk_khz = 600000;
-
/*
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
* also if safe to lower is false, we just go in the higher state
@@ -248,7 +246,7 @@ void dcn42_update_clocks(struct clk_mgr *clk_mgr_base,
/* check that we're not already in lower */
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
/* if we can go lower, go lower */
- if (display_count == 0)
+ if (has_active_display == false)
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
}
} else {
@@ -262,9 +260,7 @@ void dcn42_update_clocks(struct clk_mgr *clk_mgr_base,
dcn42_update_clocks_update_dtb_dto(clk_mgr, context, new_clocks->ref_dtbclk_khz);
dcn42_smu_set_dtbclk(clk_mgr, true);
- if (clk_mgr_base->boot_snapshot.timer_threhold)
- actual_dtbclk = REG_READ(CLK8_CLK4_CURRENT_CNT) / (clk_mgr_base->boot_snapshot.timer_threhold / 48000);
-
+ actual_dtbclk = dcn42_get_clock_freq_from_clkip(clk_mgr_base, clock_type_dtbclk);
if (actual_dtbclk > 590000) {
clk_mgr_base->clks.ref_dtbclk_khz = new_clocks->ref_dtbclk_khz;
@@ -308,7 +304,7 @@ void dcn42_update_clocks(struct clk_mgr *clk_mgr_base,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz) &&
- (new_clocks->dispclk_khz > 0 || (safe_to_lower && display_count == 0))) {
+ (new_clocks->dispclk_khz > 0 || (safe_to_lower && has_active_display == false))) {
int requested_dispclk_khz = new_clocks->dispclk_khz;
dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
@@ -386,34 +382,27 @@ bool dcn42_are_clock_states_equal(struct dc_clocks *a,
static void dcn42_dump_clk_registers_internal(struct dcn42_clk_internal *internal, struct clk_mgr *clk_mgr_base)
{
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
- uint32_t ratio = 1;
-
- internal->CLK8_CLK_TICK_CNT__TIMER_THRESHOLD = REG_READ(CLK8_CLK_TICK_CNT_CONFIG_REG) & 0xFFFFFF;
-
- ratio = internal->CLK8_CLK_TICK_CNT__TIMER_THRESHOLD / 48000;
- ASSERT(ratio != 0);
-
- if (ratio) {
- // read dcf deep sleep divider
- internal->CLK8_CLK0_DS_CNTL = REG_READ(CLK8_CLK0_DS_CNTL);
- internal->CLK8_CLK3_DS_CNTL = REG_READ(CLK8_CLK3_DS_CNTL);
- // read dispclk
- internal->CLK8_CLK0_CURRENT_CNT = REG_READ(CLK8_CLK0_CURRENT_CNT) / ratio;
- internal->CLK8_CLK0_BYPASS_CNTL = REG_READ(CLK8_CLK0_BYPASS_CNTL);
- // read dppclk
- internal->CLK8_CLK1_CURRENT_CNT = REG_READ(CLK8_CLK1_CURRENT_CNT) / ratio;
- internal->CLK8_CLK1_BYPASS_CNTL = REG_READ(CLK8_CLK1_BYPASS_CNTL);
- // read dprefclk
- internal->CLK8_CLK2_CURRENT_CNT = REG_READ(CLK8_CLK2_CURRENT_CNT) / ratio;
- internal->CLK8_CLK2_BYPASS_CNTL = REG_READ(CLK8_CLK2_BYPASS_CNTL);
- // read dcfclk
- internal->CLK8_CLK3_CURRENT_CNT = REG_READ(CLK8_CLK3_CURRENT_CNT) / ratio;
- internal->CLK8_CLK3_BYPASS_CNTL = REG_READ(CLK8_CLK3_BYPASS_CNTL);
- // read dtbclk
- internal->CLK8_CLK4_CURRENT_CNT = REG_READ(CLK8_CLK4_CURRENT_CNT) / ratio;
- internal->CLK8_CLK4_BYPASS_CNTL = REG_READ(CLK8_CLK4_BYPASS_CNTL);
- }
+ REG_GET(CLK8_CLK_TICK_CNT_CONFIG_REG, TIMER_THRESHOLD, &internal->CLK8_CLK_TICK_CNT__TIMER_THRESHOLD);
+
+ // read dcf deep sleep divider
+ internal->CLK8_CLK0_DS_CNTL = REG_READ(CLK8_CLK0_DS_CNTL);
+ internal->CLK8_CLK3_DS_CNTL = REG_READ(CLK8_CLK3_DS_CNTL);
+ // read dispclk
+ internal->CLK8_CLK0_CURRENT_CNT = dcn42_get_clock_freq_from_clkip(clk_mgr_base, clock_type_dispclk);
+ internal->CLK8_CLK0_BYPASS_CNTL = REG_READ(CLK8_CLK0_BYPASS_CNTL);
+ // read dppclk
+ internal->CLK8_CLK1_CURRENT_CNT = dcn42_get_clock_freq_from_clkip(clk_mgr_base, clock_type_dppclk);
+ internal->CLK8_CLK1_BYPASS_CNTL = REG_READ(CLK8_CLK1_BYPASS_CNTL);
+ // read dprefclk
+ internal->CLK8_CLK2_CURRENT_CNT = dcn42_get_clock_freq_from_clkip(clk_mgr_base, clock_type_dprefclk);
+ internal->CLK8_CLK2_BYPASS_CNTL = REG_READ(CLK8_CLK2_BYPASS_CNTL);
+ // read dcfclk
+ internal->CLK8_CLK3_CURRENT_CNT = dcn42_get_clock_freq_from_clkip(clk_mgr_base, clock_type_dcfclk);
+ internal->CLK8_CLK3_BYPASS_CNTL = REG_READ(CLK8_CLK3_BYPASS_CNTL);
+ // read dtbclk
+ internal->CLK8_CLK4_CURRENT_CNT = dcn42_get_clock_freq_from_clkip(clk_mgr_base, clock_type_dtbclk);
+ internal->CLK8_CLK4_BYPASS_CNTL = REG_READ(CLK8_CLK4_BYPASS_CNTL);
}
static void dcn42_dump_clk_registers(struct clk_state_registers_and_bypass *regs_and_bypass,
@@ -422,8 +411,11 @@ static void dcn42_dump_clk_registers(struct clk_state_registers_and_bypass *regs
struct dcn42_clk_internal internal = {0};
char *bypass_clks[5] = {"0x0 DFS", "0x1 REFCLK", "0x2 ERROR", "0x3 400 FCH", "0x4 600 FCH"};
+ DC_LOGGER_INIT(clk_mgr->base.base.ctx->logger);
+ (void)dc_logger;
+
dcn42_dump_clk_registers_internal(&internal, &clk_mgr->base.base);
- regs_and_bypass->timer_threhold = internal.CLK8_CLK_TICK_CNT__TIMER_THRESHOLD;
+ regs_and_bypass->timer_threshold = internal.CLK8_CLK_TICK_CNT__TIMER_THRESHOLD;
regs_and_bypass->dcfclk = internal.CLK8_CLK3_CURRENT_CNT / 10;
regs_and_bypass->dcf_deep_sleep_divider = internal.CLK8_CLK3_DS_CNTL / 10;
regs_and_bypass->dcf_deep_sleep_allow = internal.CLK8_CLK3_DS_CNTL & 0x10; /*bit 4: CLK0_ALLOW_DS*/
@@ -432,18 +424,10 @@ static void dcn42_dump_clk_registers(struct clk_state_registers_and_bypass *regs
regs_and_bypass->dppclk = internal.CLK8_CLK1_CURRENT_CNT / 10;
regs_and_bypass->dtbclk = internal.CLK8_CLK4_CURRENT_CNT / 10;
- regs_and_bypass->dppclk_bypass = internal.CLK8_CLK1_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dppclk_bypass > 4)
- regs_and_bypass->dppclk_bypass = 0;
- regs_and_bypass->dcfclk_bypass = internal.CLK8_CLK3_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dcfclk_bypass > 4)
- regs_and_bypass->dcfclk_bypass = 0;
- regs_and_bypass->dispclk_bypass = internal.CLK8_CLK0_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dispclk_bypass > 4)
- regs_and_bypass->dispclk_bypass = 0;
- regs_and_bypass->dprefclk_bypass = internal.CLK8_CLK2_BYPASS_CNTL & 0x0007;
- if (regs_and_bypass->dprefclk_bypass > 4)
- regs_and_bypass->dprefclk_bypass = 0;
+ regs_and_bypass->dispclk_bypass = get_reg_field_value(internal.CLK8_CLK0_BYPASS_CNTL, CLK8_CLK0_BYPASS_CNTL, CLK0_BYPASS_SEL);
+ regs_and_bypass->dppclk_bypass = get_reg_field_value(internal.CLK8_CLK1_BYPASS_CNTL, CLK8_CLK1_BYPASS_CNTL, CLK1_BYPASS_SEL);
+ regs_and_bypass->dprefclk_bypass = get_reg_field_value(internal.CLK8_CLK2_BYPASS_CNTL, CLK8_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL);
+ regs_and_bypass->dcfclk_bypass = get_reg_field_value(internal.CLK8_CLK3_BYPASS_CNTL, CLK8_CLK3_BYPASS_CNTL, CLK3_BYPASS_SEL);
if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) {
DC_LOG_SMU("clk_type,clk_value,deepsleep_cntl,deepsleep_allow,bypass\n");
@@ -467,7 +451,6 @@ static void dcn42_dump_clk_registers(struct clk_state_registers_and_bypass *regs
// REGISTER VALUES
DC_LOG_SMU("reg_name,value,clk_type\n");
-
DC_LOG_SMU("CLK1_CLK3_CURRENT_CNT,%d,dcfclk\n",
internal.CLK8_CLK3_CURRENT_CNT);
@@ -588,6 +571,9 @@ void dcn42_init_clocks(struct clk_mgr *clk_mgr_base)
struct clk_mgr_dcn42 *clk_mgr = TO_CLK_MGR_DCN42(clk_mgr_int);
struct dcn42_smu_dpm_clks smu_dpm_clks = { 0 };
+ DC_LOGGER_INIT(clk_mgr_base->ctx->logger);
+ (void)dc_logger;
+
init_clk_states(clk_mgr_base);
// to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk
@@ -597,6 +583,7 @@ void dcn42_init_clocks(struct clk_mgr *clk_mgr_base)
else
clk_mgr_base->dp_dto_source_clock_in_khz = clk_mgr_base->dprefclk_khz;
+ DC_LOG_SMU("dp_dto_source_clock %d, dprefclk %d\n", clk_mgr_base->dp_dto_source_clock_in_khz, clk_mgr_base->dprefclk_khz);
dcn42_dump_clk_registers(&clk_mgr_base->boot_snapshot, clk_mgr);
clk_mgr_base->clks.ref_dtbclk_khz = clk_mgr_base->boot_snapshot.dtbclk * 10;
@@ -605,6 +592,12 @@ void dcn42_init_clocks(struct clk_mgr *clk_mgr_base)
clk_mgr_base->clks.dtbclk_en = true;
}
+ if (clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dcfclk_levels != 0) {
+ /*skip to get clock table and notify pmfw watermark range again*/
+ DC_LOG_SMU("skip to get dpm_clks from pmfw from resume and acr\n");
+ return;
+ }
+
smu_dpm_clks.dpm_clks = (DpmClocks_t_dcn42 *)dm_helpers_allocate_gpu_mem(
clk_mgr_base->ctx,
DC_MEM_ALLOC_TYPE_GART,
@@ -711,10 +704,9 @@ void dcn42_init_clocks(struct clk_mgr *clk_mgr_base)
/* DTBCLK*/
clk_mgr_base->bw_params->clk_table.entries[0].dtbclk_mhz = clk_mgr_base->clks.ref_dtbclk_khz / 1000;
clk_mgr_base->bw_params->clk_table.num_entries_per_clk.num_dtbclk_levels = 1;
-
/* Refresh bounding box */
clk_mgr_base->ctx->dc->res_pool->funcs->update_bw_bounding_box(
- clk_mgr_base->ctx->dc, clk_mgr_base->bw_params);
+ clk_mgr_base->ctx->dc, clk_mgr_base->bw_params);
}
}
if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0)
@@ -826,7 +818,6 @@ static void dcn42_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr)
}
}
-/* Exposed for dcn42b reuse */
void dcn42_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn42_watermarks *table)
{
int i, num_valid_sets;
@@ -885,18 +876,42 @@ void dcn42_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn42_
void dcn42_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
{
+ int i = 0;
+ struct dcn42_watermarks *table = NULL;
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct clk_mgr_dcn42 *clk_mgr_dcn42 = TO_CLK_MGR_DCN42(clk_mgr);
- struct dcn42_watermarks *table = clk_mgr_dcn42->smu_wm_set.wm_set;
if (!clk_mgr->smu_ver)
return;
+ /*send once already skip*/
+ if (clk_mgr_base->bw_params->wm_table.entries[WM_A].valid == true)
+ return;
+ clk_mgr_dcn42->smu_wm_set.wm_set = (struct dcn42_watermarks *)dm_helpers_allocate_gpu_mem(
+ clk_mgr->base.ctx,
+ DC_MEM_ALLOC_TYPE_GART,
+ sizeof(struct dcn42_watermarks),
+ &clk_mgr_dcn42->smu_wm_set.mc_address.quad_part);
+
+ ASSERT(clk_mgr_dcn42->smu_wm_set.wm_set);
+
+ table = clk_mgr_dcn42->smu_wm_set.wm_set;
if (!table || clk_mgr_dcn42->smu_wm_set.mc_address.quad_part == 0)
return;
memset(table, 0, sizeof(*table));
+ /*same as previous asic, set wm valid before building watermark ranges*/
+ for (i = 0; i < WM_SET_COUNT; i++) {
+ clk_mgr_base->bw_params->wm_table.entries[i].wm_inst = i;
+ if (i >= clk_mgr_base->bw_params->clk_table.num_entries) {
+ clk_mgr_base->bw_params->wm_table.entries[i].valid = false;
+ continue;
+ }
+ clk_mgr_base->bw_params->wm_table.entries[i].wm_type = WM_TYPE_PSTATE_CHG;
+ clk_mgr_base->bw_params->wm_table.entries[i].valid = true;
+ }
+ /* build watermark_range will check this valid range*/
dcn42_build_watermark_ranges(clk_mgr_base->bw_params, table);
dcn42_smu_set_dram_addr_high(clk_mgr,
@@ -904,18 +919,21 @@ void dcn42_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
dcn42_smu_set_dram_addr_low(clk_mgr,
clk_mgr_dcn42->smu_wm_set.mc_address.low_part);
dcn42_smu_transfer_wm_table_dram_2_smu(clk_mgr);
+
+ if (clk_mgr_dcn42->smu_wm_set.wm_set && clk_mgr_dcn42->smu_wm_set.mc_address.quad_part != 0)
+ dm_helpers_free_gpu_mem(clk_mgr->base.ctx, DC_MEM_ALLOC_TYPE_GART,
+ clk_mgr_dcn42->smu_wm_set.wm_set);
+
}
void dcn42_set_low_power_state(struct clk_mgr *clk_mgr_base)
{
- int display_count;
struct dc *dc = clk_mgr_base->ctx->dc;
struct dc_state *context = dc->current_state;
if (clk_mgr_base->clks.pwr_state != DCN_PWR_STATE_LOW_POWER) {
- display_count = dcn42_get_active_display_cnt_wa(dc, context, NULL);
/* if we can go lower, go lower */
- if (display_count == 0)
+ if (dcn42_has_active_display(dc, context) == false)
clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER;
}
@@ -1096,14 +1114,7 @@ void dcn42_clk_mgr_construct(
clk_mgr->base.dprefclk_ss_divider = 1000;
clk_mgr->base.ss_on_dprefclk = false;
clk_mgr->base.dfs_ref_freq_khz = 48000; /*sync with pmfw*/
-
- clk_mgr->smu_wm_set.wm_set = (struct dcn42_watermarks *)dm_helpers_allocate_gpu_mem(
- clk_mgr->base.base.ctx,
- DC_MEM_ALLOC_TYPE_GART,
- sizeof(struct dcn42_watermarks),
- &clk_mgr->smu_wm_set.mc_address.quad_part);
-
- ASSERT(clk_mgr->smu_wm_set.wm_set);
+ clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
/* Changed from DCN3.2_clock_frequency doc to match
* dcn32_dump_clk_registers from 4 * dentist_vco_freq_khz /
@@ -1112,6 +1123,9 @@ void dcn42_clk_mgr_construct(
clk_mgr->base.base.dprefclk_khz = 600000;
clk_mgr->base.smu_present = false;
+ clk_mgr->base.smu_ver = dcn42_smu_get_pmfw_version(&clk_mgr->base);
+ if (clk_mgr->base.smu_ver && clk_mgr->base.smu_ver != -1)
+ clk_mgr->base.smu_present = true;
if (ctx->dc_bios->integrated_info) {
clk_mgr->base.base.dentist_vco_freq_khz = ctx->dc_bios->integrated_info->dentist_vco_freq;
@@ -1122,7 +1136,9 @@ void dcn42_clk_mgr_construct(
dcn42_bw_params.wm_table = ddr5_wm_table;
dcn42_bw_params.vram_type = ctx->dc_bios->integrated_info->memory_type;
dcn42_bw_params.dram_channel_width_bytes = ctx->dc_bios->integrated_info->memory_type == 0x22 ? 8 : 4;
- dcn42_bw_params.num_channels = ctx->dc_bios->integrated_info->ma_channel_number ? ctx->dc_bios->integrated_info->ma_channel_number : 4;
+ dcn42_bw_params.num_channels = ctx->dc_bios->integrated_info->ma_channel_number ? ctx->dc_bios->integrated_info->ma_channel_number : 1;
+ clk_mgr->base.base.dprefclk_khz = dcn42_smu_get_dprefclk(&clk_mgr->base);
+ clk_mgr->base.base.clks.ref_dtbclk_khz = dcn42_smu_get_dtbclk(&clk_mgr->base);
}
/* in case we don't get a value from the BIOS, use default */
if (clk_mgr->base.base.dentist_vco_freq_khz == 0)
@@ -1131,9 +1147,6 @@ void dcn42_clk_mgr_construct(
/* Saved clocks configured at boot for debug purposes */
dcn42_dump_clk_registers(&clk_mgr->base.base.boot_snapshot, clk_mgr);
- if (clk_mgr->base.smu_present)
- clk_mgr->base.base.dprefclk_khz = dcn42_smu_get_dprefclk(&clk_mgr->base);
- clk_mgr->base.base.clks.ref_dtbclk_khz = 600000;
dce_clock_read_ss_info(&clk_mgr->base);
/*when clk src is from FCH, it could have ss, same clock src as DPREF clk*/
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.h
index 99fcdb602c62..5ad027a9edaf 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn42/dcn42_clk_mgr.h
@@ -59,7 +59,6 @@ void dcn42_clk_mgr_construct(struct dc_context *ctx,
void dcn42_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int);
-/* Exposed for dcn42b reuse */
void dcn42_init_single_clock(unsigned int *entry_0,
uint32_t *smu_entry_0,
uint8_t num_levels);
@@ -76,4 +75,5 @@ int dcn42_get_active_display_cnt_wa(struct dc *dc, struct dc_state *context, int
void dcn42_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr, struct dc_state *context, bool safe_to_lower);
void dcn42_update_clocks_update_dtb_dto(struct clk_mgr_internal *clk_mgr, struct dc_state *context, int ref_dtbclk_khz);
bool dcn42_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base);
+bool dcn42_has_active_display(struct dc *dc, const struct dc_state *context);
#endif //__DCN42_CLK_MGR_H__
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 615bf2a01389..31589f22aae0 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2895,16 +2895,27 @@ static struct surface_update_descriptor det_surface_update(
elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
}
- if (u->blend_tf || (u->gamma && dce_use_lut(u->plane_info ? u->plane_info->format : u->surface->format))) {
+ if (u->cm || (u->gamma && dce_use_lut(u->plane_info ? u->plane_info->format : u->surface->format))) {
update_flags->bits.gamma_change = 1;
elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
}
- if (u->lut3d_func || u->func_shaper) {
+ if (u->cm && (u->cm->flags.bits.lut3d_enable || u->surface->cm.flags.bits.lut3d_enable)) {
update_flags->bits.lut_3d = 1;
elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
}
+ if (u->cm && u->cm->flags.bits.lut3d_dma_enable != u->surface->cm.flags.bits.lut3d_dma_enable &&
+ u->cm->flags.bits.lut3d_enable && u->surface->cm.flags.bits.lut3d_enable) {
+ /* Toggling 3DLUT loading between DMA and Host is illegal */
+ BREAK_TO_DEBUGGER();
+ }
+
+ if (u->cm && u->cm->flags.bits.lut3d_enable && !u->cm->flags.bits.lut3d_dma_enable) {
+ /* Host loading 3DLUT requires full update but only stream lock */
+ elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_STREAM);
+ }
+
if (u->hdr_mult.value)
if (u->hdr_mult.value != u->surface->hdr_mult.value) {
// TODO: Should be fast?
@@ -2919,24 +2930,15 @@ static struct surface_update_descriptor det_surface_update(
elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
- if (u->cm2_params) {
- if (u->cm2_params->component_settings.shaper_3dlut_setting != u->surface->mcm_shaper_3dlut_setting
- || u->cm2_params->component_settings.lut1d_enable != u->surface->mcm_lut1d_enable
- || u->cm2_params->cm2_luts.lut3d_data.lut3d_src != u->surface->mcm_luts.lut3d_data.lut3d_src) {
- update_flags->bits.mcm_transfer_function_enable_change = 1;
- elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
- }
- }
-
- if (update_flags->bits.lut_3d &&
- u->surface->mcm_luts.lut3d_data.lut3d_src != DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) {
- elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
+ if (u->cm_hist_control) {
+ update_flags->bits.cm_hist_change = 1;
+ elevate_update_type(&overall_type, UPDATE_TYPE_FAST, LOCK_DESCRIPTOR_STREAM);
}
-
if (check_config->enable_legacy_fast_update &&
(update_flags->bits.gamma_change ||
update_flags->bits.gamut_remap_change ||
update_flags->bits.input_csc_change ||
+ update_flags->bits.cm_hist_change ||
update_flags->bits.coeff_reduction_change)) {
elevate_update_type(&overall_type, UPDATE_TYPE_FULL, LOCK_DESCRIPTOR_GLOBAL);
}
@@ -3168,6 +3170,11 @@ static void copy_surface_update_to_plane(
surface->gamma_correction.type =
srf_update->gamma->type;
}
+ if (srf_update->cm_hist_control) {
+ memcpy(&surface->cm_hist_control,
+ srf_update->cm_hist_control,
+ sizeof(surface->cm_hist_control));
+ }
if (srf_update->in_transfer_func) {
surface->in_transfer_func.sdr_ref_white_level =
@@ -3181,24 +3188,12 @@ static void copy_surface_update_to_plane(
sizeof(struct dc_transfer_func_distributed_points));
}
- if (srf_update->cm2_params) {
- surface->mcm_shaper_3dlut_setting = srf_update->cm2_params->component_settings.shaper_3dlut_setting;
- surface->mcm_lut1d_enable = srf_update->cm2_params->component_settings.lut1d_enable;
- surface->mcm_luts = srf_update->cm2_params->cm2_luts;
+ /* Shaper, 3DLUT, 1DLUT */
+ if (srf_update->cm) {
+ memcpy(&surface->cm, srf_update->cm,
+ sizeof(surface->cm));
}
- if (srf_update->func_shaper) {
- memcpy(&surface->in_shaper_func, srf_update->func_shaper,
- sizeof(surface->in_shaper_func));
-
- if (surface->mcm_shaper_3dlut_setting >= DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER)
- surface->mcm_luts.shaper = &surface->in_shaper_func;
- }
-
- if (srf_update->lut3d_func)
- memcpy(&surface->lut3d_func, srf_update->lut3d_func,
- sizeof(surface->lut3d_func));
-
if (srf_update->hdr_mult.value)
surface->hdr_mult =
srf_update->hdr_mult;
@@ -3207,17 +3202,6 @@ static void copy_surface_update_to_plane(
surface->sdr_white_level_nits =
srf_update->sdr_white_level_nits;
- if (srf_update->blend_tf) {
- memcpy(&surface->blend_tf, srf_update->blend_tf,
- sizeof(surface->blend_tf));
-
- if (surface->mcm_lut1d_enable)
- surface->mcm_luts.lut1d_func = &surface->blend_tf;
- }
-
- if (srf_update->cm2_params || srf_update->blend_tf)
- surface->lut_bank_a = !surface->lut_bank_a;
-
if (srf_update->input_csc_color_matrix)
surface->input_csc_color_matrix =
*srf_update->input_csc_color_matrix;
@@ -4501,11 +4485,9 @@ static void commit_planes_for_stream(struct dc *dc,
if (!should_update_pipe_for_plane(context, pipe_ctx, plane_state))
continue;
- if (srf_updates[i].cm2_params &&
- srf_updates[i].cm2_params->cm2_luts.lut3d_data.lut3d_src ==
- DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM &&
- srf_updates[i].cm2_params->component_settings.shaper_3dlut_setting ==
- DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT &&
+ if (srf_updates[i].cm &&
+ srf_updates[i].cm->flags.bits.lut3d_enable &&
+ srf_updates[i].cm->flags.bits.lut3d_dma_enable &&
dc->hwss.trigger_3dlut_dma_load)
dc->hwss.trigger_3dlut_dma_load(dc, pipe_ctx);
@@ -5073,6 +5055,7 @@ void populate_fast_updates(struct dc_fast_update *fast_update,
fast_update[i].input_csc_color_matrix = srf_updates[i].input_csc_color_matrix;
fast_update[i].coeff_reduction_factor = srf_updates[i].coeff_reduction_factor;
fast_update[i].cursor_csc_color_matrix = srf_updates[i].cursor_csc_color_matrix;
+ fast_update[i].cm_hist_control = srf_updates[i].cm_hist_control;
}
}
@@ -5090,6 +5073,7 @@ static bool fast_updates_exist(const struct dc_fast_update *fast_update, int sur
fast_update[i].gamut_remap_matrix ||
fast_update[i].input_csc_color_matrix ||
fast_update[i].cursor_csc_color_matrix ||
+ fast_update[i].cm_hist_control ||
fast_update[i].coeff_reduction_factor)
return true;
}
@@ -5110,6 +5094,7 @@ bool fast_nonaddr_updates_exist(struct dc_fast_update *fast_update, int surface_
fast_update[i].gamma ||
fast_update[i].gamut_remap_matrix ||
fast_update[i].coeff_reduction_factor ||
+ fast_update[i].cm_hist_control ||
fast_update[i].cursor_csc_color_matrix)
return true;
}
@@ -5151,6 +5136,12 @@ static bool full_update_required(
const struct dc_stream_update *stream_update,
const struct dc_stream_state *stream)
{
+ const union dc_plane_cm_flags blend_only_flags = {
+ .bits = {
+ .blend_enable = 1,
+ }
+ };
+
if (full_update_required_weak(dc, srf_updates, surface_count, stream_update, stream))
return true;
@@ -5163,14 +5154,12 @@ static bool full_update_required(
(srf_updates[i].sdr_white_level_nits &&
srf_updates[i].sdr_white_level_nits != srf_updates->surface->sdr_white_level_nits) ||
srf_updates[i].in_transfer_func ||
- srf_updates[i].func_shaper ||
- srf_updates[i].lut3d_func ||
srf_updates[i].surface->force_full_update ||
(srf_updates[i].flip_addr &&
srf_updates[i].flip_addr->address.tmz_surface != srf_updates[i].surface->address.tmz_surface) ||
- (srf_updates[i].cm2_params &&
- (srf_updates[i].cm2_params->component_settings.shaper_3dlut_setting != srf_updates[i].surface->mcm_shaper_3dlut_setting ||
- srf_updates[i].cm2_params->component_settings.lut1d_enable != srf_updates[i].surface->mcm_lut1d_enable))))
+ (srf_updates[i].cm &&
+ ((srf_updates[i].cm->flags.all != blend_only_flags.all && srf_updates[i].cm->flags.all != 0) ||
+ (srf_updates[i].surface->cm.flags.all != blend_only_flags.all && srf_updates[i].surface->cm.flags.all != 0)))))
return true;
}
@@ -5945,6 +5934,7 @@ bool dc_is_dmub_outbox_supported(struct dc *dc)
case AMDGPU_FAMILY_GC_11_0_1:
case AMDGPU_FAMILY_GC_11_5_0:
+ case AMDGPU_FAMILY_GC_11_5_4:
if (!dc->debug.dpia_debug.bits.disable_dpia)
return true;
break;
@@ -6897,7 +6887,7 @@ bool dc_capture_register_software_state(struct dc *dc, struct dc_register_softwa
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
/* MPCC blending tree and mode control - capture actual blend configuration */
- state->mpc.mpcc_mode[i] = (plane_state->blend_tf.type != TF_TYPE_BYPASS) ? 1 : 0;
+ state->mpc.mpcc_mode[i] = (plane_state->cm.blend_func.type != TF_TYPE_BYPASS) ? 1 : 0;
state->mpc.mpcc_alpha_blend_mode[i] = plane_state->per_pixel_alpha ? 1 : 0;
state->mpc.mpcc_alpha_multiplied_mode[i] = plane_state->pre_multiplied_alpha ? 1 : 0;
state->mpc.mpcc_blnd_active_overlap_only[i] = 0; /* Default - no overlap restriction */
@@ -7295,6 +7285,23 @@ static bool update_planes_and_stream_prepare_v3(
ASSERT(scratch->flow == UPDATE_V3_FLOW_INVALID);
dc_exit_ips_for_hw_access(scratch->dc);
+ /* HWSS path determination needs to be done prior to updating the surface and stream states. */
+ struct dc_fast_update fast_update[MAX_SURFACES] = { 0 };
+
+ populate_fast_updates(fast_update,
+ scratch->surface_updates,
+ scratch->surface_count,
+ scratch->stream_update);
+
+ const bool is_hwss_fast_path_only =
+ fast_update_only(scratch->dc,
+ fast_update,
+ scratch->surface_updates,
+ scratch->surface_count,
+ scratch->stream_update,
+ scratch->stream) &&
+ !scratch->dc->check_config.enable_legacy_fast_update;
+
if (!update_planes_and_stream_state(
scratch->dc,
scratch->surface_updates,
@@ -7310,26 +7317,7 @@ static bool update_planes_and_stream_prepare_v3(
if (scratch->new_context == scratch->dc->current_state) {
ASSERT(scratch->update_type < UPDATE_TYPE_FULL);
- // TODO: Do we need this to be alive in execute?
- struct dc_fast_update fast_update[MAX_SURFACES] = { 0 };
-
- populate_fast_updates(
- fast_update,
- scratch->surface_updates,
- scratch->surface_count,
- scratch->stream_update
- );
- const bool fast = fast_update_only(
- scratch->dc,
- fast_update,
- scratch->surface_updates,
- scratch->surface_count,
- scratch->stream_update,
- scratch->stream
- )
- // TODO: Can this be used to skip `populate_fast_updates`?
- && !scratch->dc->check_config.enable_legacy_fast_update;
- scratch->flow = fast
+ scratch->flow = is_hwss_fast_path_only
? UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FAST
: UPDATE_V3_FLOW_NO_NEW_CONTEXT_CONTEXT_FULL;
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index c9fbb64d706a..8271b12c1a66 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -2431,7 +2431,6 @@ static void resource_log_pipe_for_stream(struct dc *dc, struct dc_state *state,
int slice_idx, dpp_idx, plane_idx, slice_count, dpp_count;
bool is_primary;
- DC_LOGGER_INIT(dc->ctx->logger);
slice_count = resource_get_opp_heads_for_otg_master(otg_master,
&state->res_ctx, opp_heads);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index baf820e6eae8..908f79b02102 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -33,6 +33,7 @@
#include "dc_dmub_srv.h"
#include "dc_state_priv.h"
#include "dc_stream_priv.h"
+#include "dce/dmub_hw_lock_mgr.h"
#define DC_LOGGER dc->ctx->logger
#ifndef MIN
@@ -171,10 +172,12 @@ struct dc_stream_state *dc_create_stream_for_sink(
goto fail;
stream = kzalloc_obj(struct dc_stream_state, GFP_ATOMIC);
+
if (stream == NULL)
goto fail;
stream->update_scratch = kzalloc((int32_t) dc_update_scratch_space_size(), GFP_ATOMIC);
+
if (stream->update_scratch == NULL)
goto fail;
@@ -245,7 +248,6 @@ const struct dc_stream_status *dc_stream_get_status_const(
const struct dc_stream_state *stream)
{
struct dc *dc = stream->ctx->dc;
-
return dc_state_get_stream_status(dc->current_state, stream);
}
@@ -257,6 +259,7 @@ void program_cursor_attributes(
struct resource_context *res_ctx;
struct pipe_ctx *pipe_to_program = NULL;
bool enable_cursor_offload = dc_dmub_srv_is_cursor_offload_enabled(dc);
+ bool unlock_dmub = false;
if (!stream)
return;
@@ -275,6 +278,12 @@ void program_cursor_attributes(
if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update) {
dc->hwss.begin_cursor_offload_update(dc, pipe_ctx);
} else {
+ if (dc->hwss.dmub_hw_control_lock && pipe_ctx->stream &&
+ should_use_dmub_inbox0_lock_for_link(dc, pipe_ctx->stream->link)) {
+ dc->hwss.dmub_hw_control_lock(dc, dc->current_state, true);
+ unlock_dmub = true;
+ }
+
dc->hwss.cursor_lock(dc, pipe_to_program, true);
if (pipe_to_program->next_odm_pipe)
dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, true);
@@ -297,6 +306,9 @@ void program_cursor_attributes(
dc->hwss.cursor_lock(dc, pipe_to_program, false);
if (pipe_to_program->next_odm_pipe)
dc->hwss.cursor_lock(dc, pipe_to_program->next_odm_pipe, false);
+
+ if (unlock_dmub)
+ dc->hwss.dmub_hw_control_lock(dc, dc->current_state, false);
}
}
}
@@ -404,6 +416,7 @@ void program_cursor_position(
struct resource_context *res_ctx;
struct pipe_ctx *pipe_to_program = NULL;
bool enable_cursor_offload = dc_dmub_srv_is_cursor_offload_enabled(dc);
+ bool unlock_dmub = false;
if (!stream)
return;
@@ -423,10 +436,16 @@ void program_cursor_position(
if (!pipe_to_program) {
pipe_to_program = pipe_ctx;
- if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update)
+ if (enable_cursor_offload && dc->hwss.begin_cursor_offload_update) {
dc->hwss.begin_cursor_offload_update(dc, pipe_ctx);
- else
+ } else {
+ if (dc->hwss.dmub_hw_control_lock && pipe_ctx->stream &&
+ should_use_dmub_inbox0_lock_for_link(dc, pipe_ctx->stream->link)) {
+ dc->hwss.dmub_hw_control_lock(dc, dc->current_state, true);
+ unlock_dmub = true;
+ }
dc->hwss.cursor_lock(dc, pipe_to_program, true);
+ }
}
dc->hwss.set_cursor_position(pipe_ctx);
@@ -438,10 +457,14 @@ void program_cursor_position(
}
if (pipe_to_program) {
- if (enable_cursor_offload && dc->hwss.commit_cursor_offload_update)
+ if (enable_cursor_offload && dc->hwss.commit_cursor_offload_update) {
dc->hwss.commit_cursor_offload_update(dc, pipe_to_program);
- else
+ } else {
dc->hwss.cursor_lock(dc, pipe_to_program, false);
+
+ if (unlock_dmub)
+ dc->hwss.dmub_hw_control_lock(dc, dc->current_state, false);
+ }
}
}
@@ -523,8 +546,10 @@ bool dc_stream_program_cursor_position(
struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
/* trigger event on first pipe with current stream */
- if (stream == pipe_ctx->stream) {
- pipe_ctx->stream_res.tg->funcs->program_manual_trigger(pipe_ctx->stream_res.tg);
+ if (stream == pipe_ctx->stream &&
+ pipe_ctx->stream_res.tg->funcs->program_manual_trigger) {
+ pipe_ctx->stream_res.tg->funcs->program_manual_trigger(
+ pipe_ctx->stream_res.tg);
break;
}
}
@@ -984,7 +1009,6 @@ void dc_stream_release_3dlut_for_stream(
if (rmcm_3dlut) {
rmcm_3dlut->isInUse = false;
rmcm_3dlut->stream = NULL;
- rmcm_3dlut->protection_bits = 0;
}
}
@@ -996,7 +1020,6 @@ void dc_stream_init_rmcm_3dlut(struct dc *dc)
for (int i = 0; i < num_rmcm; i++) {
dc->res_pool->rmcm_3dlut[i].isInUse = false;
dc->res_pool->rmcm_3dlut[i].stream = NULL;
- dc->res_pool->rmcm_3dlut[i].protection_bits = 0;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index d4c40b44d909..5f12dcca7f71 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -139,6 +139,9 @@ const struct dc_plane_status *dc_plane_get_status(
if (pipe_ctx->plane_state && flags.bits.address)
pipe_ctx->plane_state->status.is_flip_pending = false;
+ if (pipe_ctx->plane_state && flags.bits.histogram)
+ memset(&pipe_ctx->plane_state->status.cm_hist, 0,
+ sizeof(pipe_ctx->plane_state->status.cm_hist));
break;
}
@@ -154,6 +157,12 @@ const struct dc_plane_status *dc_plane_get_status(
if (flags.bits.address)
dc->hwss.update_pending_status(pipe_ctx);
+ if (flags.bits.histogram) {
+ struct dpp *dpp = pipe_ctx->plane_res.dpp;
+
+ if (dpp && dpp->funcs->dpp_cm_hist_read)
+ dpp->funcs->dpp_cm_hist_read(dpp, &pipe_ctx->plane_state->status.cm_hist);
+ }
}
return plane_status;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 4bdb7bb47c75..c7a09724f569 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -63,7 +63,7 @@ struct dcn_dsc_reg_state;
struct dcn_optc_reg_state;
struct dcn_dccg_reg_state;
-#define DC_VER "3.2.372"
+#define DC_VER "3.2.373"
/**
* MAX_SURFACES - representative of the upper bound of surfaces that can be piped to a single CRTC
@@ -1404,15 +1404,50 @@ struct lut_mem_mapping {
struct dc_rmcm_3dlut {
bool isInUse;
const struct dc_stream_state *stream;
- uint8_t protection_bits;
};
struct dc_3dlut {
struct kref refcount;
struct tetrahedral_params lut_3d;
- struct fixed31_32 hdr_multiplier;
union dc_3dlut_state state;
};
+
+/* 3DLUT DMA (Fast Load) params */
+struct dc_3dlut_dma {
+ struct dc_plane_address addr;
+ enum dc_cm_lut_swizzle swizzle;
+ enum dc_cm_lut_pixel_format format;
+ uint16_t bias; /* FP1.5.10 */
+ uint16_t scale; /* FP1.5.10 */
+ enum dc_cm_lut_size size;
+};
+
+/* color manager */
+union dc_plane_cm_flags {
+ unsigned int all;
+ struct {
+ unsigned int shaper_enable : 1;
+ unsigned int lut3d_enable : 1;
+ unsigned int blend_enable : 1;
+ /* whether legacy (lut3d_func) or DMA is valid */
+ unsigned int lut3d_dma_enable : 1;
+ /* RMCM lut to be used instead of MCM */
+ unsigned int rmcm_enable : 1;
+ unsigned int reserved: 27;
+ } bits;
+};
+
+struct dc_plane_cm {
+ struct kref refcount;
+ struct dc_transfer_func shaper_func;
+ union {
+ struct dc_3dlut lut3d_func;
+ struct dc_3dlut_dma lut3d_dma;
+ };
+ struct dc_transfer_func blend_func;
+ union dc_plane_cm_flags flags;
+};
+
/*
* This structure is filled in by dc_surface_get_status and contains
* the last requested address and the currently active address so the called
@@ -1490,14 +1525,18 @@ struct dc_plane_state {
struct fixed31_32 hdr_mult;
struct colorspace_transform gamut_remap_matrix;
- // TODO: No longer used, remove
- struct dc_hdr_static_metadata hdr_static_ctx;
-
enum dc_color_space color_space;
+ bool lut_bank_a;
+ struct dc_hdr_static_metadata hdr_static_ctx;
struct dc_3dlut lut3d_func;
struct dc_transfer_func in_shaper_func;
struct dc_transfer_func blend_tf;
+ enum dc_cm2_shaper_3dlut_setting mcm_shaper_3dlut_setting;
+ bool mcm_lut1d_enable;
+ struct dc_cm2_func_luts mcm_luts;
+ enum mpcc_movable_cm_location mcm_location;
+ struct dc_plane_cm cm;
struct dc_transfer_func *gamcor_tf;
enum surface_pixel_format format;
@@ -1534,11 +1573,6 @@ struct dc_plane_state {
bool is_statically_allocated;
enum chroma_cositing cositing;
- enum dc_cm2_shaper_3dlut_setting mcm_shaper_3dlut_setting;
- bool mcm_lut1d_enable;
- struct dc_cm2_func_luts mcm_luts;
- bool lut_bank_a;
- enum mpcc_movable_cm_location mcm_location;
struct dc_csc_transform cursor_csc_color_matrix;
bool adaptive_sharpness_en;
int adaptive_sharpness_policy;
@@ -1884,6 +1918,7 @@ struct dc_surface_update {
* change cm2_params.cm2_luts: Fast update
*/
const struct dc_cm2_parameters *cm2_params;
+ const struct dc_plane_cm *cm;
const struct dc_csc_transform *cursor_csc_color_matrix;
unsigned int sdr_white_level_nits;
struct dc_bias_and_scale bias_and_scale;
@@ -1928,6 +1963,10 @@ struct dc_3dlut *dc_create_3dlut_func(void);
void dc_3dlut_func_release(struct dc_3dlut *lut);
void dc_3dlut_func_retain(struct dc_3dlut *lut);
+struct dc_plane_cm *dc_plane_cm_create(void);
+void dc_plane_cm_release(struct dc_plane_cm *cm);
+void dc_plane_cm_retain(struct dc_plane_cm *cm);
+
void dc_post_update_surfaces_to_stream(
struct dc *dc);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
index 526f71616f94..6f96c5cf39fe 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_bios_types.h
@@ -102,8 +102,7 @@ struct dc_vbios_funcs {
struct bp_external_encoder_control *cntl);
enum bp_result (*dac_load_detection)(
struct dc_bios *bios,
- enum engine_id engine_id,
- struct graphics_object_id ext_enc_id);
+ enum engine_id engine_id);
enum bp_result (*transmitter_control)(
struct dc_bios *bios,
struct bp_transmitter_control *cntl);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index 9540f0ead279..7fa336bf1115 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -1374,7 +1374,7 @@ union dpcd_replay_configuration {
unsigned char DESYNC_ERROR_STATUS : 1;
unsigned char SINK_DEVICE_REPLAY_STATUS : 3;
unsigned char SINK_FRAME_LOCKED : 2;
- unsigned char RESERVED : 1;
+ unsigned char FRAME_SKIPPING_ERROR_STATUS : 1;
} bits;
unsigned char raw;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_plane.h b/drivers/gpu/drm/amd/display/dc/dc_plane.h
index 14feb843e694..2ad6d9318566 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_plane.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_plane.h
@@ -31,6 +31,7 @@
union dc_plane_status_update_flags {
struct {
uint32_t address : 1;
+ uint32_t histogram : 1;
} bits;
uint32_t raw;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
index 37d1a79e8241..ba7bf23f2b2f 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
@@ -89,6 +89,7 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
spl_in->callbacks = dcn32_spl_callbacks;
break;
case DCN_VERSION_4_01:
+ case DCN_VERSION_4_2:
spl_in->callbacks = dcn401_spl_callbacks;
break;
default:
diff --git a/drivers/gpu/drm/amd/display/dc/dc_trace.h b/drivers/gpu/drm/amd/display/dc/dc_trace.h
index bbec308a3a5e..b7a011646d53 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_trace.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_trace.h
@@ -23,8 +23,8 @@
#include "amdgpu_dm_trace.h"
-#define TRACE_DC_PIPE_STATE(pipe_ctx, index, max_pipes) \
- for (index = 0; index < max_pipes; ++index) { \
+#define TRACE_DC_PIPE_STATE(pipe_ctx, max_pipes) \
+ for (int index = 0; index < max_pipes; ++index) { \
struct pipe_ctx *pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[index]; \
if (pipe_ctx->plane_state) \
trace_amdgpu_dm_dc_pipe_state(pipe_ctx->pipe_idx, pipe_ctx->plane_state, \
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index d2e60480fb2b..e224077c8902 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -1194,6 +1194,8 @@ struct replay_config {
union replay_optimization replay_optimization;
/* Replay sub feature Frame Skipping is supported */
bool frame_skip_supported;
+ /* Replay Received Frame Skipping Error HPD. */
+ bool received_frame_skipping_error_hpd;
};
/* Replay feature flags*/
@@ -1481,4 +1483,28 @@ struct dc_validation_dpia_set {
uint32_t required_bw;
};
+enum dc_cm_lut_swizzle {
+ CM_LUT_3D_SWIZZLE_LINEAR_RGB,
+ CM_LUT_3D_SWIZZLE_LINEAR_BGR,
+ CM_LUT_1D_PACKED_LINEAR
+};
+
+enum dc_cm_lut_pixel_format {
+ CM_LUT_PIXEL_FORMAT_RGBA16161616_UNORM_12MSB,
+ CM_LUT_PIXEL_FORMAT_BGRA16161616_UNORM_12MSB,
+ CM_LUT_PIXEL_FORMAT_RGBA16161616_UNORM_12LSB,
+ CM_LUT_PIXEL_FORMAT_BGRA16161616_UNORM_12LSB,
+ CM_LUT_PIXEL_FORMAT_RGBA16161616_FLOAT_FP1_5_10,
+ CM_LUT_PIXEL_FORMAT_BGRA16161616_FLOAT_FP1_5_10
+};
+
+enum dc_cm_lut_size {
+ CM_LUT_SIZE_NONE,
+ CM_LUT_SIZE_999,
+ CM_LUT_SIZE_171717,
+ CM_LUT_SIZE_333333,
+ CM_LUT_SIZE_454545,
+ CM_LUT_SIZE_656565,
+};
+
#endif /* DC_TYPES_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
index ffcd2e139e76..088cf305a772 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
@@ -38,7 +38,11 @@
DCCG_SRII(PIXEL_RATE_CNTL, OTG, 0),\
DCCG_SRII(PIXEL_RATE_CNTL, OTG, 1),\
SR(DISPCLK_FREQ_CHANGE_CNTL),\
- SR(DC_MEM_GLOBAL_PWR_REQ_CNTL)
+ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL),\
+ SR(MICROSECOND_TIME_BASE_DIV),\
+ SR(MILLISECOND_TIME_BASE_DIV),\
+ SR(DCCG_GATE_DISABLE_CNTL),\
+ SR(DCCG_GATE_DISABLE_CNTL2)
#define DCCG_REG_LIST_DCN2() \
DCCG_COMMON_REG_LIST_DCN_BASE(),\
@@ -370,7 +374,8 @@
type OTG1_DROP_PIXEL;\
type OTG2_DROP_PIXEL;\
type OTG3_ADD_PIXEL;\
- type OTG3_DROP_PIXEL;
+ type OTG3_DROP_PIXEL;\
+ type RESYNC_FIFO_LEVEL_ADJUST_EN;
struct dccg_shift {
DCCG_REG_FIELD_LIST(uint8_t)
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn21/dcn21_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn21/dcn21_dccg.c
index 75c69348027e..c4d4eea140f3 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn21/dcn21_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn21/dcn21_dccg.c
@@ -96,6 +96,25 @@ static void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppcl
dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
}
+/*
+ * On DCN21 S0i3 resume, BIOS programs MICROSECOND_TIME_BASE_DIV to
+ * 0x00120464 as a marker that golden init has already been done.
+ * dcn21_s0i3_golden_init_wa() reads this marker later in bios_golden_init()
+ * to decide whether to skip golden init.
+ *
+ * dccg2_init() unconditionally overwrites MICROSECOND_TIME_BASE_DIV to
+ * 0x00120264, destroying the marker before it can be read.
+ *
+ * Guard the call: if the S0i3 marker is present, skip dccg2_init() so the
+ * WA can function correctly. bios_golden_init() will handle init in that case.
+ */
+static void dccg21_init(struct dccg *dccg)
+{
+ if (dccg2_is_s0i3_golden_init_wa_done(dccg))
+ return;
+
+ dccg2_init(dccg);
+}
static const struct dccg_funcs dccg21_funcs = {
.update_dpp_dto = dccg21_update_dpp_dto,
@@ -103,7 +122,7 @@ static const struct dccg_funcs dccg21_funcs = {
.set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en,
.otg_add_pixel = dccg2_otg_add_pixel,
.otg_drop_pixel = dccg2_otg_drop_pixel,
- .dccg_init = dccg2_init,
+ .dccg_init = dccg21_init,
.refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */
.allow_clock_gating = dccg2_allow_clock_gating,
.enable_memory_low_power = dccg2_enable_memory_low_power,
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn301/dcn301_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn301/dcn301_dccg.h
index 067e49cb238e..e2381ca0be0b 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn301/dcn301_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn301/dcn301_dccg.h
@@ -34,7 +34,13 @@
DCCG_SRII(DTO_PARAM, DPPCLK, 1),\
DCCG_SRII(DTO_PARAM, DPPCLK, 2),\
DCCG_SRII(DTO_PARAM, DPPCLK, 3),\
- SR(REFCLK_CNTL)
+ SR(REFCLK_CNTL),\
+ SR(DISPCLK_FREQ_CHANGE_CNTL),\
+ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL),\
+ SR(MICROSECOND_TIME_BASE_DIV),\
+ SR(MILLISECOND_TIME_BASE_DIV),\
+ SR(DCCG_GATE_DISABLE_CNTL),\
+ SR(DCCG_GATE_DISABLE_CNTL2)
#define DCCG_MASK_SH_LIST_DCN301(mask_sh) \
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 0, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h
index bf659920d4cc..b5e3849ef12a 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h
@@ -64,9 +64,12 @@
SR(DSCCLK1_DTO_PARAM),\
SR(DSCCLK2_DTO_PARAM),\
SR(DSCCLK_DTO_CTRL),\
+ SR(DCCG_GATE_DISABLE_CNTL),\
SR(DCCG_GATE_DISABLE_CNTL2),\
SR(DCCG_GATE_DISABLE_CNTL3),\
- SR(HDMISTREAMCLK0_DTO_PARAM)
+ SR(HDMISTREAMCLK0_DTO_PARAM),\
+ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL),\
+ SR(MICROSECOND_TIME_BASE_DIV)
#define DCCG_MASK_SH_LIST_DCN31(mask_sh) \
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h
index a609635f35db..ecbdc05f7c45 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h
@@ -70,11 +70,14 @@
SR(DSCCLK2_DTO_PARAM),\
SR(DSCCLK3_DTO_PARAM),\
SR(DSCCLK_DTO_CTRL),\
+ SR(DCCG_GATE_DISABLE_CNTL),\
SR(DCCG_GATE_DISABLE_CNTL2),\
SR(DCCG_GATE_DISABLE_CNTL3),\
SR(HDMISTREAMCLK0_DTO_PARAM),\
SR(OTG_PIXEL_RATE_DIV),\
- SR(DTBCLK_P_CNTL)
+ SR(DTBCLK_P_CNTL),\
+ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL),\
+ SR(MICROSECOND_TIME_BASE_DIV)
#define DCCG_MASK_SH_LIST_DCN314_COMMON(mask_sh) \
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
index a37f94dec6f2..4b9a14c679d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn401/dcn401_dccg.c
@@ -827,6 +827,16 @@ void dccg401_enable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint3
if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKD_FE_ROOT_GATE_DISABLE, 1);
break;
+ case 4:
+ if (dccg_dcn->dccg_mask->SYMCLKE_FE_ROOT_GATE_DISABLE) {
+ REG_UPDATE_2(SYMCLKE_CLOCK_ENABLE,
+ SYMCLKE_FE_EN, 1,
+ SYMCLKE_FE_SRC_SEL, link_enc_inst);
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_FE_ROOT_GATE_DISABLE, 1);
+ }
+ break;
+ default:
+ return;
}
}
@@ -855,6 +865,16 @@ void dccg401_disable_symclk_se(struct dccg *dccg, uint32_t stream_enc_inst, uint
SYMCLKD_FE_EN, 0,
SYMCLKD_FE_SRC_SEL, 0);
break;
+ case 4:
+ if (dccg_dcn->dccg_mask->SYMCLKE_FE_ROOT_GATE_DISABLE) {
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL5, SYMCLKE_FE_ROOT_GATE_DISABLE, 0);
+ REG_UPDATE_2(SYMCLKE_CLOCK_ENABLE,
+ SYMCLKE_FE_EN, 0,
+ SYMCLKE_FE_SRC_SEL, 0);
+ }
+ break;
+ default:
+ return;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.c
index d1593dc68e36..19dfc3fe5c3a 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.c
@@ -180,6 +180,61 @@ void dccg42_set_physymclk(
}
}
+void dccg42_set_pixel_rate_div(
+ struct dccg *dccg,
+ uint32_t otg_inst,
+ enum pixel_rate_div tmds_div,
+ enum pixel_rate_div unused)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ uint32_t cur_tmds_div = PIXEL_RATE_DIV_NA;
+ uint32_t dp_dto_int;
+ uint32_t reg_val;
+
+ // only 2 and 4 are valid on dcn401
+ if (tmds_div != PIXEL_RATE_DIV_BY_2 && tmds_div != PIXEL_RATE_DIV_BY_4) {
+ return;
+ }
+
+ dccg401_get_pixel_rate_div(dccg, otg_inst, &cur_tmds_div, &dp_dto_int);
+ if (tmds_div == cur_tmds_div)
+ return;
+
+ // encode enum to register value
+ reg_val = tmds_div == PIXEL_RATE_DIV_BY_4 ? 1 : 0;
+
+ switch (otg_inst) {
+ case 0:
+ REG_UPDATE(OTG_PIXEL_RATE_DIV,
+ OTG0_TMDS_PIXEL_RATE_DIV, reg_val);
+ break;
+ case 1:
+ REG_UPDATE(OTG_PIXEL_RATE_DIV,
+ OTG1_TMDS_PIXEL_RATE_DIV, reg_val);
+ break;
+ case 2:
+ REG_UPDATE(OTG_PIXEL_RATE_DIV,
+ OTG2_TMDS_PIXEL_RATE_DIV, reg_val);
+ break;
+ case 3:
+ REG_UPDATE(OTG_PIXEL_RATE_DIV,
+ OTG3_TMDS_PIXEL_RATE_DIV, reg_val);
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ return;
+ }
+}
+
+void dccg42_trigger_dio_fifo_resync(struct dccg *dccg)
+{
+ struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+
+ REG_UPDATE(DISPCLK_FREQ_CHANGE_CNTL, RESYNC_FIFO_LEVEL_ADJUST_EN, 1);
+ REG_UPDATE(DISPCLK_FREQ_CHANGE_CNTL, RESYNC_FIFO_LEVEL_ADJUST_EN, 0);
+ REG_WAIT(DISPCLK_FREQ_CHANGE_CNTL, DISPCLK_FREQ_RAMP_DONE, 1, 50, 2000);
+}
+
static void dccg42_init(struct dccg *dccg)
{
int otg_inst;
@@ -240,9 +295,9 @@ static const struct dccg_funcs dccg42_funcs = {
.otg_drop_pixel = dccg42_otg_drop_pixel,
.disable_dsc = dccg35_disable_dscclk,
.enable_dsc = dccg35_enable_dscclk,
- .set_pixel_rate_div = dccg401_set_pixel_rate_div,
+ .set_pixel_rate_div = dccg42_set_pixel_rate_div,
.get_pixel_rate_div = dccg401_get_pixel_rate_div,
- .trigger_dio_fifo_resync = dccg35_trigger_dio_fifo_resync,
+ .trigger_dio_fifo_resync = dccg42_trigger_dio_fifo_resync,
.set_dp_dto = dccg401_set_dp_dto,
.enable_symclk_se = dccg35_enable_symclk_se,
.disable_symclk_se = dccg35_disable_symclk_se,
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.h
index 96eae0003f43..d9831b0f8235 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn42/dcn42_dccg.h
@@ -238,7 +238,8 @@
DCCG_SF(SYMCLKE_CLOCK_ENABLE, SYMCLKE_SRC_SEL, mask_sh),\
DCCG_SF(SYMCLKE_CLOCK_ENABLE, SYMCLKE_CLOCK_ENABLE, mask_sh),\
DCCG_SF(SYMCLKE_CLOCK_ENABLE, SYMCLKE_FE_EN, mask_sh),\
- DCCG_SF(SYMCLKE_CLOCK_ENABLE, SYMCLKE_FE_SRC_SEL, mask_sh)
+ DCCG_SF(SYMCLKE_CLOCK_ENABLE, SYMCLKE_FE_SRC_SEL, mask_sh),\
+ DCCG_SF(DISPCLK_FREQ_CHANGE_CNTL, RESYNC_FIFO_LEVEL_ADJUST_EN, mask_sh)
void dccg42_otg_add_pixel(struct dccg *dccg,
@@ -254,6 +255,14 @@ void dccg42_set_physymclk(
enum physymclk_clock_source clk_src,
bool force_enable);
+void dccg42_set_pixel_rate_div(
+ struct dccg *dccg,
+ uint32_t otg_inst,
+ enum pixel_rate_div tmds_div,
+ enum pixel_rate_div unused);
+
+void dccg42_trigger_dio_fifo_resync(struct dccg *dccg);
+
struct dccg *dccg42_create(
struct dc_context *ctx,
const struct dccg_registers *regs,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
index 7116fdd4c7ec..d0ffa99f1fe0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.c
@@ -28,6 +28,14 @@
#include "dc_types.h"
#include "core_types.h"
+static bool dmub_hw_lock_has_inbox0_lock(const struct dc *dc)
+{
+ return dc->ctx && dc->ctx->dmub_srv &&
+ dc->hwss.dmub_hw_control_lock &&
+ dc->hwss.dmub_hw_control_lock_fast &&
+ dc->ctx->dmub_srv->dmub->meta_info.feature_bits.bits.inbox0_lock_support;
+}
+
void dmub_hw_lock_mgr_cmd(struct dc_dmub_srv *dmub_srv,
bool lock,
union dmub_hw_lock_flags *hw_locks,
@@ -105,5 +113,13 @@ bool should_use_dmub_inbox1_lock(const struct dc *dc, const struct dc_link *link
if (dc->ctx->dce_version >= DCN_VERSION_4_01)
return false;
+ if (dmub_hw_lock_has_inbox0_lock(dc))
+ return false;
+
return dmub_hw_lock_mgr_does_link_require_lock(dc, link);
}
+
+bool should_use_dmub_inbox0_lock_for_link(const struct dc *dc, const struct dc_link *link)
+{
+ return dmub_hw_lock_has_inbox0_lock(dc) && dmub_hw_lock_mgr_does_link_require_lock(dc, link);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h
index 4c80ca8484ad..3e8caa930390 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_hw_lock_mgr.h
@@ -46,7 +46,38 @@ void dmub_hw_lock_mgr_inbox0_cmd(struct dc_dmub_srv *dmub_srv,
* Return: true if the inbox1 lock should be used, false otherwise
*/
bool should_use_dmub_inbox1_lock(const struct dc *dc, const struct dc_link *link);
+
+/**
+ * dmub_hw_lock_mgr_does_link_require_lock() - Returns true if the link has a feature that needs the HW lock.
+ *
+ * @dc: Pointer to DC object
+ * @link: The link to check
+ *
+ * Return: true if the link has a feature that needs the HW lock, false otherwise
+ */
bool dmub_hw_lock_mgr_does_link_require_lock(const struct dc *dc, const struct dc_link *link);
+
+/**
+ * dmub_hw_lock_mgr_does_context_require_lock() - Returns true if the context has any stream that needs the HW lock.
+ *
+ * @dc: Pointer to DC object
+ * @context: The context to check
+ *
+ * Return: true if the context has any stream that needs the HW lock, false otherwise
+ */
bool dmub_hw_lock_mgr_does_context_require_lock(const struct dc *dc, const struct dc_state *context);
+/**
+ * should_use_dmub_inbox0_lock_for_link() - Checks if the inbox0 interlock with DMU should be used.
+ *
+ * Is not functionally equivalent to inbox1 as DMUB will not own programming of the relevant locking
+ * registers.
+ *
+ * @dc: pointer to DC object
+ * @link: optional pointer to the link object to check for enabled link features
+ *
+ * Return: true if the inbox0 lock should be used, false otherwise
+ */
+bool should_use_dmub_inbox0_lock_for_link(const struct dc *dc, const struct dc_link *link);
+
#endif /*_DMUB_HW_LOCK_MGR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile b/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile
index 30cfc0848792..70d9f2cd0b60 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/Makefile
@@ -90,6 +90,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.o := $(dml2
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn42.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_ccflags)
@@ -107,6 +108,7 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_top/dml2_top_soc15.o := $(d
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.o := $(dml2_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn42.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn3.o := $(dml2_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.o := $(dml2_rcflags)
@@ -124,6 +126,7 @@ DML21 += src/dml2_core/dml2_core_dcn4_calcs.o
DML21 += src/dml2_dpmm/dml2_dpmm_dcn4.o
DML21 += src/dml2_dpmm/dml2_dpmm_factory.o
DML21 += src/dml2_mcg/dml2_mcg_dcn4.o
+DML21 += src/dml2_mcg/dml2_mcg_dcn42.o
DML21 += src/dml2_mcg/dml2_mcg_factory.o
DML21 += src/dml2_pmo/dml2_pmo_dcn3.o
DML21 += src/dml2_pmo/dml2_pmo_factory.o
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c
index 09303c282495..8e8935995fca 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/display_mode_core.c
@@ -4089,8 +4089,8 @@ static void CalculateSwathAndDETConfiguration(struct display_mode_lib_scratch_st
dml_uint_t MaximumSwathHeightC[__DML_NUM_PLANES__];
dml_uint_t RoundedUpMaxSwathSizeBytesY[__DML_NUM_PLANES__];
dml_uint_t RoundedUpMaxSwathSizeBytesC[__DML_NUM_PLANES__];
- dml_uint_t RoundedUpSwathSizeBytesY[__DML_NUM_PLANES__];
- dml_uint_t RoundedUpSwathSizeBytesC[__DML_NUM_PLANES__];
+ dml_uint_t RoundedUpSwathSizeBytesY[__DML_NUM_PLANES__] = { 0 };
+ dml_uint_t RoundedUpSwathSizeBytesC[__DML_NUM_PLANES__] = { 0 };
dml_uint_t SwathWidthSingleDPP[__DML_NUM_PLANES__];
dml_uint_t SwathWidthSingleDPPChroma[__DML_NUM_PLANES__];
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c
index 75a279997961..847fab508750 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.c
@@ -45,6 +45,9 @@ static enum dml2_project_id dml21_dcn_revision_to_dml2_project_id(enum dce_versi
case DCN_VERSION_4_01:
project_id = dml2_project_dcn4x_stage2_auto_drr_svp;
break;
+ case DCN_VERSION_4_2:
+ project_id = dml2_project_dcn42;
+ break;
default:
project_id = dml2_project_invalid;
DC_ERR("unsupported dcn version for DML21!");
@@ -598,29 +601,31 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
plane->composition.viewport.stationary = false;
- if (plane_state->mcm_luts.lut3d_data.lut3d_src == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) {
+ if (plane_state->cm.flags.bits.lut3d_dma_enable) {
plane->tdlut.setup_for_tdlut = true;
- switch (plane_state->mcm_luts.lut3d_data.gpu_mem_params.layout) {
- case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB:
- case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR:
+ switch (plane_state->cm.lut3d_dma.swizzle) {
+ case CM_LUT_3D_SWIZZLE_LINEAR_RGB:
+ case CM_LUT_3D_SWIZZLE_LINEAR_BGR:
plane->tdlut.tdlut_addressing_mode = dml2_tdlut_sw_linear;
break;
- case DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR:
+ case CM_LUT_1D_PACKED_LINEAR:
+ default:
plane->tdlut.tdlut_addressing_mode = dml2_tdlut_simple_linear;
break;
}
- switch (plane_state->mcm_luts.lut3d_data.gpu_mem_params.size) {
- case DC_CM2_GPU_MEM_SIZE_171717:
- plane->tdlut.tdlut_width_mode = dml2_tdlut_width_17_cube;
+ switch (plane_state->cm.lut3d_dma.size) {
+ case CM_LUT_SIZE_333333:
+ plane->tdlut.tdlut_width_mode = dml2_tdlut_width_33_cube;
break;
- case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
+ case CM_LUT_SIZE_171717:
default:
- //plane->tdlut.tdlut_width_mode = dml2_tdlut_width_flatten; // dml2_tdlut_width_flatten undefined
+ plane->tdlut.tdlut_width_mode = dml2_tdlut_width_17_cube;
break;
}
}
+
plane->tdlut.setup_for_tdlut |= dml_ctx->config.force_tdlut_enable;
plane->dynamic_meta_data.enable = false;
@@ -824,6 +829,9 @@ void dml21_copy_clocks_to_dc_state(struct dml2_context *in_ctx, struct dc_state
context->bw_ctx.bw.dcn.clk.subvp_prefetch_fclk_khz = in_ctx->v21.mode_programming.programming->min_clocks.dcn4x.svp_prefetch_no_throttle.fclk_khz;
context->bw_ctx.bw.dcn.clk.stutter_efficiency.base_efficiency = in_ctx->v21.mode_programming.programming->stutter.base_percent_efficiency;
context->bw_ctx.bw.dcn.clk.stutter_efficiency.low_power_efficiency = in_ctx->v21.mode_programming.programming->stutter.low_power_percent_efficiency;
+ context->bw_ctx.bw.dcn.clk.stutter_efficiency.z8_stutter_efficiency = in_ctx->v21.mode_programming.programming->informative.power_management.z8.stutter_efficiency;
+ context->bw_ctx.bw.dcn.clk.stutter_efficiency.z8_stutter_period = in_ctx->v21.mode_programming.programming->informative.power_management.z8.stutter_period;
+ context->bw_ctx.bw.dcn.clk.zstate_support = in_ctx->v21.mode_programming.programming->z8_stutter.supported_in_blank; /*ignore meets_eco since it is not used*/
}
static struct dml2_dchub_watermark_regs *wm_set_index_to_dc_wm_set(union dcn_watermark_set *watermarks, const enum dml2_dchub_watermark_reg_set_index wm_index)
@@ -931,3 +939,31 @@ void dml21_set_dc_p_state_type(
}
}
+void dml21_init_min_clocks_for_dc_state(struct dml2_context *in_ctx, struct dc_state *context)
+{
+ unsigned int lowest_dpm_state_index = 0;
+ struct dc_clocks *min_clocks = &context->bw_ctx.bw.dcn.clk;
+
+ min_clocks->dispclk_khz = in_ctx->v21.dml_init.soc_bb.clk_table.dispclk.clk_values_khz[lowest_dpm_state_index];
+ min_clocks->dppclk_khz = in_ctx->v21.dml_init.soc_bb.clk_table.dppclk.clk_values_khz[lowest_dpm_state_index];
+ min_clocks->dcfclk_khz = in_ctx->v21.dml_init.soc_bb.clk_table.dcfclk.clk_values_khz[lowest_dpm_state_index];
+ min_clocks->dramclk_khz = in_ctx->v21.dml_init.soc_bb.clk_table.uclk.clk_values_khz[lowest_dpm_state_index];
+ min_clocks->fclk_khz = in_ctx->v21.dml_init.soc_bb.clk_table.fclk.clk_values_khz[lowest_dpm_state_index];
+ min_clocks->idle_dramclk_khz = 0;
+ min_clocks->idle_fclk_khz = 0;
+ min_clocks->dcfclk_deep_sleep_khz = 0;
+ min_clocks->fclk_p_state_change_support = true;
+ min_clocks->p_state_change_support = true;
+ min_clocks->dtbclk_en = false;
+ min_clocks->ref_dtbclk_khz = 0;
+ min_clocks->socclk_khz = in_ctx->v21.dml_init.soc_bb.clk_table.socclk.clk_values_khz[lowest_dpm_state_index];
+ min_clocks->subvp_prefetch_dramclk_khz = 0;
+ min_clocks->subvp_prefetch_fclk_khz = 0;
+ min_clocks->phyclk_khz = in_ctx->v21.dml_init.soc_bb.clk_table.phyclk.clk_values_khz[lowest_dpm_state_index];
+ min_clocks->stutter_efficiency.base_efficiency = 1;
+ min_clocks->stutter_efficiency.low_power_efficiency = 1;
+ min_clocks->stutter_efficiency.z8_stutter_efficiency = 1;
+ min_clocks->stutter_efficiency.z8_stutter_period = 100000;
+ min_clocks->zstate_support = DCN_ZSTATE_SUPPORT_ALLOW;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.h
index 9880d3e0398e..f51d3d8a52c3 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_translation_helper.h
@@ -25,4 +25,5 @@ void dml21_map_hw_resources(struct dml2_context *dml_ctx);
void dml21_get_pipe_mcache_config(struct dc_state *context, struct pipe_ctx *pipe_ctx, struct dml2_per_plane_programming *pln_prog, struct dml2_pipe_configuration_descriptor *mcache_pipe_config);
void dml21_set_dc_p_state_type(struct pipe_ctx *pipe_ctx, struct dml2_per_stream_programming *stream_programming, bool sub_vp_enabled);
unsigned int map_plane_to_dml21_display_cfg(const struct dml2_context *dml_ctx, unsigned int stream_id, const struct dc_plane_state *plane, const struct dc_state *context);
+void dml21_init_min_clocks_for_dc_state(struct dml2_context *in_ctx, struct dc_state *context);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c
index 732b994b8864..ab7ec24268be 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_utils.c
@@ -374,6 +374,7 @@ void dml21_handle_phantom_streams_planes(const struct dc *dc, struct dc_state *c
dml2_map_dc_pipes(dml_ctx, context, NULL, &dml_ctx->v21.dml_to_dc_pipe_mapping, dc->current_state);
}
+
static unsigned int dml21_build_fams2_stream_programming_v2(const struct dc *dc,
struct dc_state *context,
struct dml2_context *dml_ctx)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c
index 798abb2b2e67..2623e917ec28 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/dml21_wrapper.c
@@ -9,16 +9,21 @@
#include "dml21_utils.h"
#include "dml21_translation_helper.h"
#include "dml2_dc_resource_mgmt.h"
+#include "dc_fpu.h"
+
+#if !defined(DC_RUN_WITH_PREEMPTION_ENABLED)
+#define DC_RUN_WITH_PREEMPTION_ENABLED(code) code
+#endif // !DC_RUN_WITH_PREEMPTION_ENABLED
#define INVALID -1
static bool dml21_allocate_memory(struct dml2_context **dml_ctx)
{
- *dml_ctx = vzalloc(sizeof(struct dml2_context));
+ DC_RUN_WITH_PREEMPTION_ENABLED(*dml_ctx = vzalloc(sizeof(struct dml2_context)));
if (!(*dml_ctx))
return false;
- (*dml_ctx)->v21.dml_init.dml2_instance = vzalloc(sizeof(struct dml2_instance));
+ DC_RUN_WITH_PREEMPTION_ENABLED((*dml_ctx)->v21.dml_init.dml2_instance = vzalloc(sizeof(struct dml2_instance)));
if (!((*dml_ctx)->v21.dml_init.dml2_instance))
return false;
@@ -28,7 +33,7 @@ static bool dml21_allocate_memory(struct dml2_context **dml_ctx)
(*dml_ctx)->v21.mode_support.display_config = &(*dml_ctx)->v21.display_config;
(*dml_ctx)->v21.mode_programming.display_config = (*dml_ctx)->v21.mode_support.display_config;
- (*dml_ctx)->v21.mode_programming.programming = vzalloc(sizeof(struct dml2_display_cfg_programming));
+ DC_RUN_WITH_PREEMPTION_ENABLED((*dml_ctx)->v21.mode_programming.programming = vzalloc(sizeof(struct dml2_display_cfg_programming)));
if (!((*dml_ctx)->v21.mode_programming.programming))
return false;
@@ -70,8 +75,9 @@ static void dml21_init(const struct dc *in_dc, struct dml2_context *dml_ctx, con
bool dml21_create(const struct dc *in_dc, struct dml2_context **dml_ctx, const struct dml2_configuration_options *config)
{
/* Allocate memory for initializing DML21 instance */
- if (!dml21_allocate_memory(dml_ctx))
+ if (!dml21_allocate_memory(dml_ctx)) {
return false;
+ }
dml21_init(in_dc, *dml_ctx, config);
@@ -215,6 +221,7 @@ static bool dml21_mode_check_and_programming(const struct dc *in_dc, struct dc_s
return true;
if (context->stream_count == 0) {
+ dml21_init_min_clocks_for_dc_state(dml_ctx, context);
dml21_build_fams2_programming(in_dc, context, dml_ctx);
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn42_soc_bb.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn42_soc_bb.h
new file mode 100644
index 000000000000..c75778ea7a2c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/bounding_boxes/dcn42_soc_bb.h
@@ -0,0 +1,263 @@
+/* SPDX-License-Identifier: MIT */
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#ifndef __DML_DML_DCN42_SOC_BB__
+#define __DML_DML_DCN42_SOC_BB__
+
+#include "dml_top_soc_parameter_types.h"
+
+static const struct dml2_soc_qos_parameters dml_dcn42_variant_a_soc_qos_params = {
+ .derate_table = {
+ .system_active_urgent = {
+ .dram_derate_percent_pixel = 65,
+ .dram_derate_percent_vm = 30,
+ .dram_derate_percent_pixel_and_vm = 60,
+ .fclk_derate_percent = 80,
+ .dcfclk_derate_percent = 80,
+ },
+ .system_active_average = {
+ .dram_derate_percent_pixel = 30,
+ .dram_derate_percent_vm = 30,
+ .dram_derate_percent_pixel_and_vm = 30,
+ .fclk_derate_percent = 60,
+ .dcfclk_derate_percent = 60,
+ },
+ .dcn_mall_prefetch_urgent = {
+ .dram_derate_percent_pixel = 65,
+ .dram_derate_percent_vm = 30,
+ .dram_derate_percent_pixel_and_vm = 60,
+ .fclk_derate_percent = 80,
+ .dcfclk_derate_percent = 80,
+ },
+ .dcn_mall_prefetch_average = {
+ .dram_derate_percent_pixel = 30,
+ .dram_derate_percent_vm = 30,
+ .dram_derate_percent_pixel_and_vm = 30,
+ .fclk_derate_percent = 60,
+ .dcfclk_derate_percent = 60,
+ },
+ .system_idle_average = {
+ .dram_derate_percent_pixel = 30,
+ .dram_derate_percent_vm = 30,
+ .dram_derate_percent_pixel_and_vm = 30,
+ .fclk_derate_percent = 60,
+ .dcfclk_derate_percent = 60,
+ },
+ },
+ .writeback = {
+ .base_latency_us = 12,
+ .scaling_factor_us = 0,
+ .scaling_factor_mhz = 0,
+ },
+ .qos_params = {
+ .dcn32x = {
+ .loaded_round_trip_latency_fclk_cycles = 106,
+ .urgent_latency_us = {
+ .base_latency_us = 4,
+ .base_latency_pixel_vm_us = 4,
+ .base_latency_vm_us = 4,
+ .scaling_factor_fclk_us = 0,
+ .scaling_factor_mhz = 0,
+ },
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ },
+ },
+ .qos_type = dml2_qos_param_type_dcn3,
+};
+
+static const struct dml2_soc_bb dml2_socbb_dcn42 = {
+ .clk_table = {
+ .wck_ratio = {
+ .clk_values_khz = {2},
+ },
+ .uclk = {
+ .clk_values_khz = {400000},
+ .num_clk_values = 1,
+ },
+ .fclk = {
+ .clk_values_khz = {400000},
+ .num_clk_values = 1,
+ },
+ .dcfclk = {
+ .clk_values_khz = {200000},
+ .num_clk_values = 1,
+ },
+ .dispclk = {
+ .clk_values_khz = {1500000},
+ .num_clk_values = 1,
+ },
+ .dppclk = {
+ .clk_values_khz = {1500000},
+ .num_clk_values = 1,
+ },
+ .dtbclk = {
+ .clk_values_khz = {600000},
+ .num_clk_values = 1,
+ },
+ .phyclk = {
+ .clk_values_khz = {810000},
+ .num_clk_values = 1,
+ },
+ .socclk = {
+ .clk_values_khz = {600000},
+ .num_clk_values = 1,
+ },
+ .dscclk = {
+ .clk_values_khz = {500000},
+ .num_clk_values = 1,
+ },
+ .phyclk_d18 = {
+ .clk_values_khz = {667000},
+ .num_clk_values = 1,
+ },
+ .phyclk_d32 = {
+ .clk_values_khz = {625000},
+ .num_clk_values = 1,
+ },
+ .dram_config = {
+ .channel_width_bytes = 4,
+ .channel_count = 4,
+ .alt_clock_bw_conversion = true,
+ },
+ },
+
+ .qos_parameters = {
+ .derate_table = {
+ .system_active_urgent = {
+ .dram_derate_percent_pixel = 65,
+ .dram_derate_percent_vm = 30,
+ .dram_derate_percent_pixel_and_vm = 60,
+ .fclk_derate_percent = 80,
+ .dcfclk_derate_percent = 80,
+ },
+ .system_active_average = {
+ .dram_derate_percent_pixel = 30,
+ .dram_derate_percent_vm = 30,
+ .dram_derate_percent_pixel_and_vm = 30,
+ .fclk_derate_percent = 60,
+ .dcfclk_derate_percent = 60,
+ },
+ .dcn_mall_prefetch_urgent = {
+ .dram_derate_percent_pixel = 65,
+ .dram_derate_percent_vm = 30,
+ .dram_derate_percent_pixel_and_vm = 60,
+ .fclk_derate_percent = 80,
+ .dcfclk_derate_percent = 80,
+ },
+ .dcn_mall_prefetch_average = {
+ .dram_derate_percent_pixel = 30,
+ .dram_derate_percent_vm = 30,
+ .dram_derate_percent_pixel_and_vm = 30,
+ .fclk_derate_percent = 60,
+ .dcfclk_derate_percent = 60,
+ },
+ .system_idle_average = {
+ .dram_derate_percent_pixel = 30,
+ .dram_derate_percent_vm = 30,
+ .dram_derate_percent_pixel_and_vm = 30,
+ .fclk_derate_percent = 60,
+ .dcfclk_derate_percent = 60,
+ },
+ },
+ .writeback = {
+ .base_latency_us = 12,
+ .scaling_factor_us = 0,
+ .scaling_factor_mhz = 0,
+ },
+ .qos_params = {
+ .dcn32x = {
+ .loaded_round_trip_latency_fclk_cycles = 106,
+ .urgent_latency_us = {
+ .base_latency_us = 4,
+ .base_latency_pixel_vm_us = 4,
+ .base_latency_vm_us = 4,
+ .scaling_factor_fclk_us = 0,
+ .scaling_factor_mhz = 0,
+ },
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ },
+ },
+ .qos_type = dml2_qos_param_type_dcn3,
+ },
+
+ .power_management_parameters = {
+ .dram_clk_change_blackout_us = 29,
+ .fclk_change_blackout_us = 0,
+ .g7_ppt_blackout_us = 0,
+ .stutter_enter_plus_exit_latency_us = 11,
+ .stutter_exit_latency_us = 9,
+ .z8_stutter_enter_plus_exit_latency_us = 300,
+ .z8_stutter_exit_latency_us = 200,
+ },
+
+ .vmin_limit = {
+ .dispclk_khz = 632 * 1000,
+ },
+
+ .dprefclk_mhz = 600,
+ .xtalclk_mhz = 24,
+ .pcie_refclk_mhz = 100,
+ .dchub_refclk_mhz = 50,
+ .mall_allocated_for_dcn_mbytes = 64,
+ .max_outstanding_reqs = 256,
+ .fabric_datapath_to_dcn_data_return_bytes = 32,
+ .return_bus_width_bytes = 64,
+ .hostvm_min_page_size_kbytes = 4,
+ .gpuvm_min_page_size_kbytes = 256,
+ .gpuvm_max_page_table_levels = 1,
+ .hostvm_max_non_cached_page_table_levels = 2,
+ .phy_downspread_percent = 0.38,
+ .dcn_downspread_percent = 0.38,
+ .dispclk_dppclk_vco_speed_mhz = 3000,
+ .do_urgent_latency_adjustment = 0,
+ .mem_word_bytes = 32,
+ .num_dcc_mcaches = 8,
+ .mcache_size_bytes = 2048,
+ .mcache_line_size_bytes = 32,
+ .max_fclk_for_uclk_dpm_khz = 2200 * 1000,
+};
+
+static const struct dml2_ip_capabilities dml2_dcn42_max_ip_caps = {
+ .pipe_count = 4,
+ .otg_count = 4,
+ .num_dsc = 4,
+ .max_num_dp2p0_streams = 4,
+ .max_num_hdmi_frl_outputs = 1,
+ .max_num_dp2p0_outputs = 4,
+ .rob_buffer_size_kbytes = 64,
+ .config_return_buffer_size_in_kbytes = 1792,
+ .config_return_buffer_segment_size_in_kbytes = 64,
+ .meta_fifo_size_in_kentries = 32,
+ .compressed_buffer_segment_size_in_kbytes = 64,
+ .cursor_buffer_size = 24,
+ .max_flip_time_us = 110,
+ .max_flip_time_lines = 50,
+ .hostvm_mode = 0,
+ .subvp_drr_scheduling_margin_us = 100,
+ .subvp_prefetch_end_to_mall_start_us = 15,
+ .subvp_fw_processing_delay = 15,
+ .max_vactive_det_fill_delay_us = 400,
+
+ .fams2 = {
+ .max_allow_delay_us = 100 * 1000,
+ .scheduling_delay_us = 550,
+ .vertical_interrupt_ack_delay_us = 40,
+ .allow_programming_delay_us = 18,
+ .min_allow_width_us = 20,
+ .subvp_df_throttle_delay_us = 100,
+ .subvp_programming_delay_us = 200,
+ .subvp_prefetch_to_mall_delay_us = 18,
+ .drr_programming_delay_us = 35,
+
+ .lock_timeout_us = 5000,
+ .recovery_timeout_us = 5000,
+ .flip_programming_delay_us = 300,
+ },
+};
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h
index b44762e21550..4e9abe1a568d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_display_cfg_types.h
@@ -27,6 +27,19 @@ enum dml2_swizzle_mode {
dml2_gfx11_sw_256kb_d_x,
dml2_gfx11_sw_256kb_r_x,
+ dml2_sw_linear_256b, // GFX10 SW_LINEAR only accepts 256 byte aligned pitch
+ dml2_gfx10_sw_64kb_r_x,
+ dml2_gfx102_sw_64kb_s,
+ dml2_gfx102_sw_64kb_s_t,
+ dml2_gfx102_sw_64kb_s_x,
+ dml2_gfx102_sw_64kb_r_x,
+
+ dml2_linear_64elements, // GFX7 LINEAR_ALIGNED accepts pitch alignment of the maximum of 64 elements or 256 bytes
+ dml2_gfx7_1d_thin,
+ dml2_gfx7_2d_thin_gen_zero,
+ dml2_gfx7_2d_thin_gen_one,
+ dml2_gfx7_2d_thin_arlene,
+ dml2_gfx7_2d_thin_anubis
};
enum dml2_source_format_class {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h
index 943fd3f040c3..98b26116cdc1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/inc/dml_top_types.h
@@ -19,6 +19,8 @@ enum dml2_project_id {
dml2_project_dcn4x_stage1,
dml2_project_dcn4x_stage2,
dml2_project_dcn4x_stage2_auto_drr_svp,
+ dml2_project_dcn40,
+ dml2_project_dcn42,
};
enum dml2_pstate_change_support {
@@ -79,6 +81,7 @@ struct dml2_options {
struct dml2_pmo_options pmo_options;
};
+
struct dml2_initialize_instance_in_out {
struct dml2_instance *dml2_instance;
struct dml2_options options;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c
index eba948e187c1..608b4a305c65 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.c
@@ -78,6 +78,86 @@ struct dml2_core_ip_params core_dcn4_ip_caps_base = {
.subvp_swath_height_margin_lines = 16,
};
+struct dml2_core_ip_params core_dcn42_ip_caps_base = {
+ .vblank_nom_default_us = 668,
+ .remote_iommu_outstanding_translations = 256,
+ .rob_buffer_size_kbytes = 64,
+ .config_return_buffer_size_in_kbytes = 1792,
+ .config_return_buffer_segment_size_in_kbytes = 64,
+ .compressed_buffer_segment_size_in_kbytes = 64,
+ .dpte_buffer_size_in_pte_reqs_luma = 68,
+ .dpte_buffer_size_in_pte_reqs_chroma = 36,
+ .pixel_chunk_size_kbytes = 8,
+ .alpha_pixel_chunk_size_kbytes = 4,
+ .min_pixel_chunk_size_bytes = 1024,
+ .writeback_chunk_size_kbytes = 8,
+ .line_buffer_size_bits = 1171920,
+ .max_line_buffer_lines = 32,
+ .writeback_interface_buffer_size_kbytes = 90,
+
+ //Number of pipes after DCN Pipe harvesting
+ .max_num_dpp = 4,
+ .max_num_otg = 4,
+ .max_num_opp = 4,
+ .max_num_wb = 1,
+ .max_dchub_pscl_bw_pix_per_clk = 4,
+ .max_pscl_lb_bw_pix_per_clk = 2,
+ .max_lb_vscl_bw_pix_per_clk = 4,
+ .max_vscl_hscl_bw_pix_per_clk = 4,
+ .max_hscl_ratio = 6,
+ .max_vscl_ratio = 6,
+ .max_hscl_taps = 8,
+ .max_vscl_taps = 8,
+ .dispclk_ramp_margin_percent = 1,
+ .dppclk_delay_subtotal = 47,
+ .dppclk_delay_scl = 50,
+ .dppclk_delay_scl_lb_only = 16,
+ .dppclk_delay_cnvc_formatter = 28,
+ .dppclk_delay_cnvc_cursor = 6,
+ .cursor_buffer_size = 42,
+ .cursor_chunk_size = 2,
+ .dispclk_delay_subtotal = 125,
+ .max_inter_dcn_tile_repeaters = 8,
+ .writeback_max_hscl_ratio = 1,
+ .writeback_max_vscl_ratio = 1,
+ .writeback_min_hscl_ratio = 1,
+ .writeback_min_vscl_ratio = 1,
+ .writeback_max_hscl_taps = 1,
+ .writeback_max_vscl_taps = 1,
+ .writeback_line_buffer_buffer_size = 0,
+ .num_dsc = 4,
+ .maximum_dsc_bits_per_component = 12,
+ .maximum_pixels_per_line_per_dsc_unit = 5760,
+ .dsc422_native_support = true,
+ .dcc_supported = true,
+ .ptoi_supported = false,
+
+ .cursor_64bpp_support = true,
+ .dynamic_metadata_vm_enabled = false,
+
+ .max_num_hdmi_frl_outputs = 0,
+ .max_num_dp2p0_outputs = 2,
+ .max_num_dp2p0_streams = 4,
+ .imall_supported = 1,
+ .max_flip_time_us = 110,
+ .max_flip_time_lines = 50,
+ .words_per_channel = 16,
+
+ .subvp_fw_processing_delay_us = 15,
+ .subvp_pstate_allow_width_us = 20,
+ .subvp_swath_height_margin_lines = 16,
+
+ .dcn_mrq_present = 1,
+ .zero_size_buffer_entries = 512,
+ .compbuf_reserved_space_zs = 64,
+ .dcc_meta_buffer_size_bytes = 6272,
+ .meta_chunk_size_kbytes = 2,
+ .min_meta_chunk_size_bytes = 256,
+
+ .dchub_arb_to_ret_delay = 102,
+ .hostvm_mode = 1,
+};
+
static void patch_ip_caps_with_explicit_ip_params(struct dml2_ip_capabilities *ip_caps, const struct dml2_core_ip_params *ip_params)
{
ip_caps->pipe_count = ip_params->max_num_dpp;
@@ -153,6 +233,37 @@ bool core_dcn4_initialize(struct dml2_core_initialize_in_out *in_out)
return true;
}
+bool core_dcn42_initialize(struct dml2_core_initialize_in_out *in_out)
+{
+ struct dml2_core_instance *core = in_out->instance;
+
+ if (!in_out->minimum_clock_table)
+ return false;
+ else
+ core->minimum_clock_table = in_out->minimum_clock_table;
+
+ if (in_out->explicit_ip_bb && in_out->explicit_ip_bb_size > 0) {
+ memcpy(&core->clean_me_up.mode_lib.ip, in_out->explicit_ip_bb, in_out->explicit_ip_bb_size);
+
+ // FIXME_STAGE2:
+ // DV still uses stage1 ip_param_st for each variant, need to patch the ip_caps with ip_param info
+ // Should move DV to use ip_caps but need move more overrides to ip_caps
+ patch_ip_caps_with_explicit_ip_params(in_out->ip_caps, in_out->explicit_ip_bb);
+ core->clean_me_up.mode_lib.ip.subvp_pstate_allow_width_us = core_dcn4_ip_caps_base.subvp_pstate_allow_width_us;
+ core->clean_me_up.mode_lib.ip.subvp_fw_processing_delay_us = core_dcn4_ip_caps_base.subvp_pstate_allow_width_us;
+ core->clean_me_up.mode_lib.ip.subvp_swath_height_margin_lines = core_dcn4_ip_caps_base.subvp_swath_height_margin_lines;
+ } else {
+ memcpy(&core->clean_me_up.mode_lib.ip, &core_dcn42_ip_caps_base, sizeof(struct dml2_core_ip_params));
+ patch_ip_params_with_ip_caps(&core->clean_me_up.mode_lib.ip, in_out->ip_caps);
+ core->clean_me_up.mode_lib.ip.imall_supported = false;
+ }
+
+ memcpy(&core->clean_me_up.mode_lib.soc, in_out->soc_bb, sizeof(struct dml2_soc_bb));
+ memcpy(&core->clean_me_up.mode_lib.ip_caps, in_out->ip_caps, sizeof(struct dml2_ip_capabilities));
+
+ return true;
+}
+
static void create_phantom_stream_from_main_stream(struct dml2_stream_parameters *phantom, const struct dml2_stream_parameters *main,
const struct dml2_implicit_svp_meta *meta)
{
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h
index a68bb001a346..5c26d819a673 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_dcn4.h
@@ -5,6 +5,7 @@
#ifndef __DML2_CORE_DCN4_H__
#define __DML2_CORE_DCN4_H__
bool core_dcn4_initialize(struct dml2_core_initialize_in_out *in_out);
+bool core_dcn42_initialize(struct dml2_core_initialize_in_out *in_out);
bool core_dcn4_mode_support(struct dml2_core_mode_support_in_out *in_out);
bool core_dcn4_mode_programming(struct dml2_core_mode_programming_in_out *in_out);
bool core_dcn4_populate_informative(struct dml2_core_populate_informative_in_out *in_out);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c
index cc4f0663c6d6..6cad99c21139 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_factory.c
@@ -21,6 +21,7 @@ bool dml2_core_create(enum dml2_project_id project_id, struct dml2_core_instance
case dml2_project_dcn4x_stage1:
result = false;
break;
+ case dml2_project_dcn40:
case dml2_project_dcn4x_stage2:
case dml2_project_dcn4x_stage2_auto_drr_svp:
out->initialize = &core_dcn4_initialize;
@@ -30,6 +31,14 @@ bool dml2_core_create(enum dml2_project_id project_id, struct dml2_core_instance
out->calculate_mcache_allocation = &core_dcn4_calculate_mcache_allocation;
result = true;
break;
+ case dml2_project_dcn42:
+ out->initialize = &core_dcn42_initialize;
+ out->mode_support = &core_dcn4_mode_support;
+ out->mode_programming = &core_dcn4_mode_programming;
+ out->populate_informative = &core_dcn4_populate_informative;
+ out->calculate_mcache_allocation = &core_dcn4_calculate_mcache_allocation;
+ result = true;
+ break;
case dml2_project_invalid:
default:
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c
index b57d0f6ea6a1..6930ba7ce5b7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.c
@@ -428,6 +428,9 @@ bool dml2_core_utils_is_phantom_pipe(const struct dml2_plane_parameters *plane_c
unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel)
{
+ if (dml2_core_utils_get_gfx_version(sw_mode) == 10 || dml2_core_utils_get_gfx_version(sw_mode) == 7) {
+ return dml2_core_utils_get_tile_block_size_bytes_backcompat(sw_mode, byte_per_pixel);
+ }
if (sw_mode == dml2_sw_linear)
return 256;
@@ -459,14 +462,56 @@ unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw
};
}
+unsigned int dml2_core_utils_get_tile_block_size_bytes_backcompat(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel)
+{
+ if (sw_mode == dml2_sw_linear_256b)
+ return 256;
+ else if (sw_mode == dml2_gfx10_sw_64kb_r_x)
+ return 65536;
+ else if (sw_mode == dml2_gfx102_sw_64kb_s)
+ return 65536;
+ else if (sw_mode == dml2_gfx102_sw_64kb_s_t)
+ return 65536;
+ else if (sw_mode == dml2_gfx102_sw_64kb_s_x)
+ return 65536;
+ else if (sw_mode == dml2_gfx102_sw_64kb_r_x)
+ return 65536;
+ else if (sw_mode == dml2_linear_64elements)
+ return 256;
+ else if (sw_mode == dml2_gfx7_1d_thin)
+ return 256;
+ else if (sw_mode == dml2_gfx7_2d_thin_gen_zero)
+ return (128 * 64 * byte_per_pixel);
+ else if (sw_mode == dml2_gfx7_2d_thin_gen_one)
+ return (128 * 128 * byte_per_pixel);
+ else if (sw_mode == dml2_gfx7_2d_thin_arlene)
+ return (64 * 32 * byte_per_pixel);
+ else if (sw_mode == dml2_gfx7_2d_thin_anubis)
+ return (128 * 128 * byte_per_pixel);
+ else {
+ DML_ASSERT(0);
+ return 256;
+ };
+}
+
bool dml2_core_utils_get_segment_horizontal_contiguous(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel)
{
- return (byte_per_pixel != 2);
+ if (dml2_core_utils_get_gfx_version(sw_mode) == 10 || dml2_core_utils_get_gfx_version(sw_mode) == 7) {
+ return dml2_core_utils_get_segment_horizontal_contiguous_backcompat(sw_mode, byte_per_pixel);
+ } else {
+ return (byte_per_pixel != 2);
+ }
+}
+
+bool dml2_core_utils_get_segment_horizontal_contiguous_backcompat(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel)
+{
+ return !((byte_per_pixel == 4) &&
+ ((sw_mode == dml2_gfx10_sw_64kb_r_x) || (sw_mode == dml2_gfx102_sw_64kb_s) || (sw_mode == dml2_gfx102_sw_64kb_s_t) || (sw_mode == dml2_gfx102_sw_64kb_s_x)));
}
bool dml2_core_utils_is_linear(enum dml2_swizzle_mode sw_mode)
{
- return sw_mode == dml2_sw_linear;
+ return (sw_mode == dml2_sw_linear || sw_mode == dml2_sw_linear_256b || sw_mode == dml2_linear_64elements);
};
@@ -499,6 +544,20 @@ int unsigned dml2_core_utils_get_gfx_version(enum dml2_swizzle_mode sw_mode)
sw_mode == dml2_gfx11_sw_256kb_d_x ||
sw_mode == dml2_gfx11_sw_256kb_r_x)
version = 11;
+ else if (sw_mode == dml2_sw_linear_256b ||
+ sw_mode == dml2_gfx10_sw_64kb_r_x ||
+ sw_mode == dml2_gfx102_sw_64kb_s ||
+ sw_mode == dml2_gfx102_sw_64kb_s_t ||
+ sw_mode == dml2_gfx102_sw_64kb_s_x ||
+ sw_mode == dml2_gfx102_sw_64kb_r_x)
+ version = 10;
+ else if (sw_mode == dml2_linear_64elements ||
+ sw_mode == dml2_gfx7_1d_thin ||
+ sw_mode == dml2_gfx7_2d_thin_gen_zero ||
+ sw_mode == dml2_gfx7_2d_thin_gen_one ||
+ sw_mode == dml2_gfx7_2d_thin_arlene ||
+ sw_mode == dml2_gfx7_2d_thin_anubis)
+ version = 7;
else {
DML_LOG_VERBOSE("ERROR: Invalid sw_mode setting! val=%u\n", sw_mode);
DML_ASSERT(0);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h
index 95f0d017add4..471e73ed671c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_core/dml2_core_utils.h
@@ -22,6 +22,8 @@ void dml2_core_utils_pipe_plane_mapping(const struct core_display_cfg_support_in
bool dml2_core_utils_is_phantom_pipe(const struct dml2_plane_parameters *plane_cfg);
unsigned int dml2_core_utils_get_tile_block_size_bytes(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel);
bool dml2_core_utils_get_segment_horizontal_contiguous(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel);
+unsigned int dml2_core_utils_get_tile_block_size_bytes_backcompat(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel);
+bool dml2_core_utils_get_segment_horizontal_contiguous_backcompat(enum dml2_swizzle_mode sw_mode, unsigned int byte_per_pixel);
bool dml2_core_utils_is_vertical_rotation(enum dml2_rotation_angle Scan);
bool dml2_core_utils_is_linear(enum dml2_swizzle_mode sw_mode);
int unsigned dml2_core_utils_get_gfx_version(enum dml2_swizzle_mode sw_mode);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
index 9d7741fd0adb..d17e59d684fd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.c
@@ -802,3 +802,36 @@ bool dpmm_dcn4_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_
return true;
}
+bool dpmm_dcn42_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_out)
+{
+ const struct dml2_display_cfg *display_cfg = &in_out->display_cfg->display_config;
+ const struct dml2_core_internal_display_mode_lib *mode_lib = &in_out->core->clean_me_up.mode_lib;
+ struct dml2_dchub_global_register_set *dchubbub_regs = &in_out->programming->global_regs;
+
+ double refclk_freq_in_mhz = (display_cfg->overrides.hw.dlg_ref_clk_mhz > 0) ? (double)display_cfg->overrides.hw.dlg_ref_clk_mhz : mode_lib->soc.dchub_refclk_mhz;
+
+ /* set A */
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].fclk_pstate = (int unsigned)(mode_lib->mp.Watermark.FCLKChangeWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_enter = (int unsigned)(mode_lib->mp.Watermark.StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_exit = (int unsigned)(mode_lib->mp.Watermark.StutterExitWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_enter_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterEnterPlusExitWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].sr_exit_z8 = (int unsigned)(mode_lib->mp.Watermark.Z8StutterExitWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].temp_read_or_ppt = (int unsigned)(mode_lib->mp.Watermark.temp_read_or_ppt_watermark_us * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].uclk_pstate = (int unsigned)(mode_lib->mp.Watermark.DRAMClockChangeWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].urgent = (int unsigned)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].usr = (int unsigned)(mode_lib->mp.Watermark.USRRetrainingWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].refcyc_per_trip_to_mem = (unsigned int)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].refcyc_per_meta_trip_to_mem = (unsigned int)(mode_lib->mp.Watermark.UrgentWatermark * refclk_freq_in_mhz);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].frac_urg_bw_flip = (unsigned int)(mode_lib->mp.FractionOfUrgentBandwidthImmediateFlip * 1000);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].frac_urg_bw_nom = (unsigned int)(mode_lib->mp.FractionOfUrgentBandwidth * 1000);
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A].frac_urg_bw_mall = (unsigned int)(mode_lib->mp.FractionOfUrgentBandwidthMALL * 1000);
+
+ /* set B */
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_B] = dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A];
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_C] = dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A];
+ dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_D] = dchubbub_regs->wm_regs[DML2_DCHUB_WATERMARK_SET_A];
+
+ dchubbub_regs->num_watermark_sets = 4;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h
index e7b58f2efda4..5fbd07e238a8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_dcn4.h
@@ -10,5 +10,6 @@
bool dpmm_dcn3_map_mode_to_soc_dpm(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out);
bool dpmm_dcn4_map_mode_to_soc_dpm(struct dml2_dpmm_map_mode_to_soc_dpm_params_in_out *in_out);
bool dpmm_dcn4_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_out);
+bool dpmm_dcn42_map_watermarks(struct dml2_dpmm_map_watermarks_params_in_out *in_out);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
index dfd01440737d..1f2d9e97f5fd 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_dpmm/dml2_dpmm_factory.c
@@ -31,6 +31,7 @@ bool dml2_dpmm_create(enum dml2_project_id project_id, struct dml2_dpmm_instance
out->map_watermarks = &dummy_map_watermarks;
result = true;
break;
+ case dml2_project_dcn40:
case dml2_project_dcn4x_stage2:
out->map_mode_to_soc_dpm = &dpmm_dcn3_map_mode_to_soc_dpm;
out->map_watermarks = &dummy_map_watermarks;
@@ -41,6 +42,11 @@ bool dml2_dpmm_create(enum dml2_project_id project_id, struct dml2_dpmm_instance
out->map_watermarks = &dpmm_dcn4_map_watermarks;
result = true;
break;
+ case dml2_project_dcn42:
+ out->map_mode_to_soc_dpm = &dpmm_dcn4_map_mode_to_soc_dpm;
+ out->map_watermarks = &dpmm_dcn42_map_watermarks;
+ result = true;
+ break;
case dml2_project_invalid:
default:
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h
index f54fde8fba90..02da6f45cbf7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn4.h
@@ -10,4 +10,4 @@
bool mcg_dcn4_build_min_clock_table(struct dml2_mcg_build_min_clock_table_params_in_out *in_out);
bool mcg_dcn4_unit_test(void);
-#endif \ No newline at end of file
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn42.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn42.c
new file mode 100644
index 000000000000..1f67cbc2c236
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn42.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2026 Advanced Micro Devices, Inc.
+
+#include "dml2_mcg_dcn42.h"
+#include "dml_top_soc_parameter_types.h"
+
+static unsigned long long uclk_to_dram_bw_kbps(unsigned long uclk_khz, const struct dml2_dram_params *dram_config, unsigned long wck_ratio)
+{
+ unsigned long long bw_kbps = 0;
+
+ bw_kbps = (unsigned long long) uclk_khz * dram_config->channel_count * dram_config->channel_width_bytes * wck_ratio * 2;
+ return bw_kbps;
+}
+
+static bool build_min_clk_table_coarse_grained(const struct dml2_soc_bb *soc_bb, struct dml2_mcg_min_clock_table *min_table)
+{
+ int i;
+
+ for (i = 0; i < soc_bb->clk_table.fclk.num_clk_values; i++) {
+ if (i < soc_bb->clk_table.uclk.num_clk_values) {
+ min_table->dram_bw_table.entries[i].pre_derate_dram_bw_kbps =
+ uclk_to_dram_bw_kbps(soc_bb->clk_table.uclk.clk_values_khz[i], &soc_bb->clk_table.dram_config, soc_bb->clk_table.wck_ratio.clk_values_khz[i]);
+ min_table->dram_bw_table.entries[i].min_uclk_khz = soc_bb->clk_table.uclk.clk_values_khz[i];
+ } else {
+ min_table->dram_bw_table.entries[i].pre_derate_dram_bw_kbps = min_table->dram_bw_table.entries[soc_bb->clk_table.uclk.num_clk_values - 1].pre_derate_dram_bw_kbps;
+ min_table->dram_bw_table.entries[i].min_uclk_khz = soc_bb->clk_table.uclk.clk_values_khz[soc_bb->clk_table.uclk.num_clk_values - 1];
+ }
+
+ min_table->dram_bw_table.entries[i].min_dcfclk_khz = soc_bb->clk_table.dcfclk.clk_values_khz[i];
+ min_table->dram_bw_table.entries[i].min_fclk_khz = soc_bb->clk_table.fclk.clk_values_khz[i];
+ }
+ min_table->dram_bw_table.num_entries = soc_bb->clk_table.fclk.num_clk_values;
+
+ return true;
+}
+
+static bool build_min_clock_table(const struct dml2_soc_bb *soc_bb, struct dml2_mcg_min_clock_table *min_table)
+{
+ bool result;
+
+ if (!soc_bb || !min_table)
+ return false;
+
+
+ if (soc_bb->clk_table.uclk.num_clk_values > DML_MCG_MAX_CLK_TABLE_SIZE)
+ return false;
+
+ min_table->fixed_clocks_khz.amclk = 0;
+ min_table->fixed_clocks_khz.dprefclk = soc_bb->dprefclk_mhz * 1000;
+ min_table->fixed_clocks_khz.pcierefclk = soc_bb->pcie_refclk_mhz * 1000;
+ min_table->fixed_clocks_khz.dchubrefclk = soc_bb->dchub_refclk_mhz * 1000;
+ min_table->fixed_clocks_khz.xtalclk = soc_bb->xtalclk_mhz * 1000;
+
+ min_table->max_clocks_khz.dispclk = soc_bb->clk_table.dispclk.clk_values_khz[soc_bb->clk_table.dispclk.num_clk_values - 1];
+ min_table->max_clocks_khz.dppclk = soc_bb->clk_table.dppclk.clk_values_khz[soc_bb->clk_table.dppclk.num_clk_values - 1];
+ min_table->max_clocks_khz.dscclk = soc_bb->clk_table.dscclk.clk_values_khz[soc_bb->clk_table.dscclk.num_clk_values - 1];
+ min_table->max_clocks_khz.dtbclk = soc_bb->clk_table.dtbclk.clk_values_khz[soc_bb->clk_table.dtbclk.num_clk_values - 1];
+ min_table->max_clocks_khz.phyclk = soc_bb->clk_table.phyclk.clk_values_khz[soc_bb->clk_table.phyclk.num_clk_values - 1];
+
+ min_table->max_ss_clocks_khz.dispclk = (unsigned int)((double)min_table->max_clocks_khz.dispclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0));
+ min_table->max_ss_clocks_khz.dppclk = (unsigned int)((double)min_table->max_clocks_khz.dppclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0));
+ min_table->max_ss_clocks_khz.dtbclk = (unsigned int)((double)min_table->max_clocks_khz.dtbclk / (1.0 + soc_bb->dcn_downspread_percent / 100.0));
+
+ min_table->max_clocks_khz.dcfclk = soc_bb->clk_table.dcfclk.clk_values_khz[soc_bb->clk_table.dcfclk.num_clk_values - 1];
+ min_table->max_clocks_khz.fclk = soc_bb->clk_table.fclk.clk_values_khz[soc_bb->clk_table.fclk.num_clk_values - 1];
+
+ result = build_min_clk_table_coarse_grained(soc_bb, min_table);
+
+ return result;
+}
+
+bool mcg_dcn42_build_min_clock_table(struct dml2_mcg_build_min_clock_table_params_in_out *in_out)
+{
+ return build_min_clock_table(in_out->soc_bb, in_out->min_clk_table);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn42.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn42.h
new file mode 100644
index 000000000000..d4ea49e3e674
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_dcn42.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: MIT */
+//
+// Copyright 2026 Advanced Micro Devices, Inc.
+
+#ifndef __DML2_MCG_DCN42_H__
+#define __DML2_MCG_DCN42_H__
+
+#include "dml2_internal_shared_types.h"
+
+bool mcg_dcn42_build_min_clock_table(struct dml2_mcg_build_min_clock_table_params_in_out *in_out);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c
index c60b8fe90819..3dcd2c250633 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_mcg/dml2_mcg_factory.c
@@ -4,6 +4,7 @@
#include "dml2_mcg_factory.h"
#include "dml2_mcg_dcn4.h"
+#include "dml2_mcg_dcn42.h"
#include "dml2_external_lib_deps.h"
static bool dummy_build_min_clock_table(struct dml2_mcg_build_min_clock_table_params_in_out *in_out)
@@ -25,11 +26,16 @@ bool dml2_mcg_create(enum dml2_project_id project_id, struct dml2_mcg_instance *
out->build_min_clock_table = &dummy_build_min_clock_table;
result = true;
break;
+ case dml2_project_dcn40:
case dml2_project_dcn4x_stage2:
case dml2_project_dcn4x_stage2_auto_drr_svp:
out->build_min_clock_table = &mcg_dcn4_build_min_clock_table;
result = true;
break;
+ case dml2_project_dcn42:
+ out->build_min_clock_table = &mcg_dcn42_build_min_clock_table;
+ result = true;
+ break;
case dml2_project_invalid:
default:
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
index c26e100fcaf2..e8691983c0eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
@@ -23,6 +23,7 @@ static const struct dml2_pmo_pstate_strategy base_strategy_list_1_display[] = {
.allow_state_increase = true,
},
+
// Then VBlank
{
.per_stream_pstate_method = { dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na, dml2_pstate_method_na },
@@ -53,6 +54,7 @@ static const struct dml2_pmo_pstate_strategy base_strategy_list_2_display[] = {
.allow_state_increase = true,
},
+
// Then VActive + VBlank
{
.per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vblank, dml2_pstate_method_na, dml2_pstate_method_na },
@@ -113,6 +115,7 @@ static const struct dml2_pmo_pstate_strategy base_strategy_list_3_display[] = {
.allow_state_increase = true,
},
+
// VActive + 1 VBlank
{
.per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vblank, dml2_pstate_method_na },
@@ -149,6 +152,7 @@ static const struct dml2_pmo_pstate_strategy base_strategy_list_4_display[] = {
.allow_state_increase = true,
},
+
// VActive + 1 VBlank
{
.per_stream_pstate_method = { dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vactive, dml2_pstate_method_vblank },
@@ -1651,6 +1655,7 @@ static bool validate_pstate_support_strategy_cofunctionality(struct dml2_pmo_ins
if (svp_count > 0 && (pmo->options->disable_svp || !all_timings_support_svp(pmo, display_cfg, svp_stream_mask)))
return false;
+
return is_config_schedulable(pmo, display_cfg, pstate_strategy);
}
@@ -1980,6 +1985,7 @@ static void reset_display_configuration(struct display_configuation_with_meta *d
}
}
+
static void setup_planes_for_drr_by_mask(struct display_configuation_with_meta *display_config,
struct dml2_pmo_instance *pmo,
int plane_mask)
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c
index 55d2464365d0..4d687fa86caa 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.c
@@ -3,8 +3,8 @@
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dml2_pmo_factory.h"
-#include "dml2_pmo_dcn4_fams2.h"
#include "dml2_pmo_dcn3.h"
+#include "dml2_pmo_dcn4_fams2.h"
#include "dml2_external_lib_deps.h"
static bool dummy_init_for_stutter(struct dml2_pmo_init_for_stutter_in_out *in_out)
@@ -37,6 +37,7 @@ bool dml2_pmo_create(enum dml2_project_id project_id, struct dml2_pmo_instance *
out->optimize_dcc_mcache = pmo_dcn4_fams2_optimize_dcc_mcache;
result = true;
break;
+ case dml2_project_dcn40:
case dml2_project_dcn4x_stage2:
out->initialize = pmo_dcn3_initialize;
@@ -56,6 +57,7 @@ bool dml2_pmo_create(enum dml2_project_id project_id, struct dml2_pmo_instance *
result = true;
break;
+ case dml2_project_dcn42:
case dml2_project_dcn4x_stage2_auto_drr_svp:
out->initialize = pmo_dcn4_fams2_initialize;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h
index b90f6263cd85..7218de1824cc 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_pmo/dml2_pmo_factory.h
@@ -10,4 +10,4 @@
bool dml2_pmo_create(enum dml2_project_id project_id, struct dml2_pmo_instance *out);
-#endif \ No newline at end of file
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c
index 5a33e2f357f4..a6c5031f69c1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_interfaces.c
@@ -17,6 +17,8 @@ bool dml2_initialize_instance(struct dml2_initialize_instance_in_out *in_out)
case dml2_project_dcn4x_stage1:
case dml2_project_dcn4x_stage2:
case dml2_project_dcn4x_stage2_auto_drr_svp:
+ case dml2_project_dcn40:
+ case dml2_project_dcn42:
return dml2_top_soc15_initialize_instance(in_out);
case dml2_project_invalid:
default:
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c
index 5e14d85821e2..0e3177fe9d27 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/dml2_top/dml2_top_legacy.c
@@ -3,7 +3,6 @@
// Copyright 2024 Advanced Micro Devices, Inc.
#include "dml2_top_legacy.h"
-#include "dml2_top_soc15.h"
#include "dml2_core_factory.h"
#include "dml2_pmo_factory.h"
#include "display_mode_core_structs.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h
index a6bd75f30d20..d328d92240b4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml21/src/inc/dml2_internal_shared_types.h
@@ -410,6 +410,7 @@ struct dml2_core_mode_support_in_out {
} legacy;
};
+
struct dml2_core_mode_programming_in_out {
/*
* Inputs
@@ -501,6 +502,7 @@ struct dml2_core_instance {
bool (*populate_informative)(struct dml2_core_populate_informative_in_out *in_out);
bool (*calculate_mcache_allocation)(struct dml2_calculate_mcache_allocation_in_out *in_out);
+
struct {
struct dml2_core_internal_display_mode_lib mode_lib;
} clean_me_up;
@@ -753,6 +755,7 @@ struct dml2_pmo_instance {
bool (*test_for_stutter)(struct dml2_pmo_test_for_stutter_in_out *in_out);
bool (*optimize_for_stutter)(struct dml2_pmo_optimize_for_stutter_in_out *in_out);
+
struct dml2_pmo_init_data init_data;
struct dml2_pmo_scratch scratch;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c
index 66040c877d68..d56e58ce26c7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_mall_phantom.c
@@ -357,7 +357,7 @@ static bool enough_pipes_for_subvp(struct dml2_context *ctx, struct dc_state *st
*/
static bool subvp_subvp_schedulable(struct dml2_context *ctx, struct dc_state *context)
{
- struct pipe_ctx *subvp_pipes[2];
+ struct pipe_ctx *subvp_pipes[2] = { NULL, NULL };
struct dc_stream_state *phantom = NULL;
uint32_t microschedule_lines = 0;
uint32_t index = 0;
@@ -369,6 +369,9 @@ static bool subvp_subvp_schedulable(struct dml2_context *ctx, struct dc_state *c
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
uint32_t time_us = 0;
+ if (pipe == NULL || pipe->stream == NULL)
+ continue;
+
/* Loop to calculate the maximum microschedule time between the two SubVP pipes,
* and also to store the two main SubVP pipe pointers in subvp_pipes[2].
*/
@@ -386,14 +389,19 @@ static bool subvp_subvp_schedulable(struct dml2_context *ctx, struct dc_state *c
if (time_us > max_microschedule_us)
max_microschedule_us = time_us;
- subvp_pipes[index] = pipe;
- index++;
+ if (index < 2)
+ subvp_pipes[index++] = pipe;
// Maximum 2 SubVP pipes
if (index == 2)
break;
}
}
+
+ /* Minimal guard to avoid C6001 before subvp_pipes[0]/[1] dereference */
+ if (index < 2 || !subvp_pipes[0] || !subvp_pipes[1])
+ return false;
+
vactive1_us = ((subvp_pipes[0]->stream->timing.v_addressable * subvp_pipes[0]->stream->timing.h_total) /
(double)(subvp_pipes[0]->stream->timing.pix_clk_100hz * 100)) * 1000000;
vactive2_us = ((subvp_pipes[1]->stream->timing.v_addressable * subvp_pipes[1]->stream->timing.h_total) /
@@ -459,6 +467,11 @@ bool dml2_svp_drr_schedulable(struct dml2_context *ctx, struct dc_state *context
break;
}
+ if (pipe == NULL || pipe->stream == NULL) {
+ // Defensive: should never happen, try to catch in debug
+ ASSERT(0);
+ return false;
+ }
phantom_stream = ctx->config.svp_pstate.callbacks.get_paired_subvp_stream(context, pipe->stream);
main_timing = &pipe->stream->timing;
phantom_timing = &phantom_stream->timing;
@@ -549,6 +562,13 @@ static bool subvp_vblank_schedulable(struct dml2_context *ctx, struct dc_state *
if (!subvp_pipe && pipe_mall_type == SUBVP_MAIN)
subvp_pipe = pipe;
}
+
+ if (subvp_pipe == NULL) {
+ // Defensive: should never happen, catch in debug
+ ASSERT(0);
+ return false;
+ }
+
// Use ignore_msa_timing_param flag to identify as DRR
if (found && context->res_ctx.pipe_ctx[vblank_index].stream->ignore_msa_timing_param) {
// SUBVP + DRR case
@@ -753,6 +773,12 @@ static void enable_phantom_plane(struct dml2_context *ctx,
return;
}
+ /* Minimal NULL guard for C6011 */
+ if (!phantom_plane) {
+ ASSERT(0);
+ continue;
+ }
+
memcpy(&phantom_plane->address, &curr_pipe->plane_state->address, sizeof(phantom_plane->address));
memcpy(&phantom_plane->scaling_quality, &curr_pipe->plane_state->scaling_quality,
sizeof(phantom_plane->scaling_quality));
@@ -880,6 +906,11 @@ bool dml2_svp_add_phantom_pipe_to_dc_state(struct dml2_context *ctx, struct dc_s
if (ctx->config.svp_pstate.force_disable_subvp)
return false;
+ if (!state) {
+ ASSERT(0);
+ return false;
+ }
+
if (!all_pipes_have_stream_and_plane(ctx, state))
return false;
@@ -898,6 +929,10 @@ bool dml2_svp_add_phantom_pipe_to_dc_state(struct dml2_context *ctx, struct dc_s
}
if (enough_pipes_for_subvp(ctx, state) && assign_subvp_pipe(ctx, state, &dc_pipe_idx)) {
+ if (state->res_ctx.pipe_ctx[dc_pipe_idx].stream == NULL) {
+ ASSERT(0);
+ return false;
+ }
dml_pipe_idx = dml2_helper_find_dml_pipe_idx_by_stream_id(ctx, state->res_ctx.pipe_ctx[dc_pipe_idx].stream->stream_id);
svp_height = mode_support_info->SubViewportLinesNeededInMALL[dml_pipe_idx];
vstartup = dml_get_vstartup_calculated(&ctx->v20.dml_core_ctx, dml_pipe_idx);
diff --git a/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c
index 307186eb6af0..408559d6fb2d 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2_0/dml2_wrapper.c
@@ -84,8 +84,9 @@ static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_op
bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2)
{
// TODO : Temporarily add DCN_VERSION_3_2 for N-1 validation. Remove DCN_VERSION_3_2 after N-1 validation phase is complete.
- if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01))
+ if ((in_dc->debug.using_dml21) && (in_dc->ctx->dce_version >= DCN_VERSION_4_01)) {
return dml21_create(in_dc, dml2, config);
+ }
// Allocate Mode Lib Ctx
*dml2 = dml2_allocate_memory();
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
index ce91e5d28956..0e70ffc784b1 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn10/dcn10_dpp.c
@@ -172,10 +172,14 @@ bool dpp1_get_optimal_number_of_taps(
scl_data->taps.h_taps_c = in_taps->h_taps_c;
if (!dpp->ctx->dc->debug.always_scale) {
- if (IDENTITY_RATIO(scl_data->ratios.horz))
+ if (IDENTITY_RATIO(scl_data->ratios.horz)) {
scl_data->taps.h_taps = 1;
- if (IDENTITY_RATIO(scl_data->ratios.vert))
+ scl_data->taps.h_taps_c = 1;
+ }
+ if (IDENTITY_RATIO(scl_data->ratios.vert)) {
scl_data->taps.v_taps = 1;
+ scl_data->taps.v_taps_c = 1;
+ }
if (IDENTITY_RATIO(scl_data->ratios.horz_c))
scl_data->taps.h_taps_c = 1;
if (IDENTITY_RATIO(scl_data->ratios.vert_c))
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
index c7923531da83..8a146968ee15 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp.c
@@ -524,10 +524,14 @@ bool dpp3_get_optimal_number_of_taps(
scl_data->taps.v_taps_c = max_taps_c;
if (!dpp->ctx->dc->debug.always_scale) {
- if (IDENTITY_RATIO(scl_data->ratios.horz))
+ if (IDENTITY_RATIO(scl_data->ratios.horz)) {
scl_data->taps.h_taps = 1;
- if (IDENTITY_RATIO(scl_data->ratios.vert))
+ scl_data->taps.h_taps_c = 1;
+ }
+ if (IDENTITY_RATIO(scl_data->ratios.vert)) {
scl_data->taps.v_taps = 1;
+ scl_data->taps.v_taps_c = 1;
+ }
if (IDENTITY_RATIO(scl_data->ratios.horz_c))
scl_data->taps.h_taps_c = 1;
if (IDENTITY_RATIO(scl_data->ratios.vert_c))
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c
index 82eca0e7b7d0..3284084ca7ad 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn30/dcn30_dpp_cm.c
@@ -132,6 +132,8 @@ static void dpp3_power_on_gamcor_lut(
if (dpp_base->ctx->dc->debug.enable_mem_low_power.bits.cm) {
if (power_on) {
REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_FORCE, 0);
+ if (dpp_base->ctx->dc->caps.ips_v2_support)
+ REG_UPDATE(CM_MEM_PWR_CTRL, GAMCOR_MEM_PWR_DIS, 1);
REG_WAIT(CM_MEM_PWR_STATUS, GAMCOR_MEM_PWR_STATE, 0, 1, 5);
} else {
dpp_base->ctx->dc->optimized_required = true;
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
index a62c4733ed3b..8b6155f9122f 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
@@ -155,7 +155,12 @@ static void dpp401_power_on_dscl(
if (dpp->tf_regs->DSCL_MEM_PWR_CTRL) {
if (power_on) {
REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_FORCE, 0);
- REG_WAIT(DSCL_MEM_PWR_STATUS, LUT_MEM_PWR_STATE, 0, 1, 5);
+ if (dpp->base.ctx->dc->caps.ips_v2_support) {
+ /*hw default changes to LS*/
+ REG_UPDATE(DSCL_MEM_PWR_CTRL, LUT_MEM_PWR_DIS, 1);
+ REG_WAIT(DSCL_MEM_PWR_STATUS, LUT_MEM_PWR_STATE, 0, 1, 100);
+ } else
+ REG_WAIT(DSCL_MEM_PWR_STATUS, LUT_MEM_PWR_STATE, 0, 1, 5);
} else {
if (dpp->base.ctx->dc->debug.enable_mem_low_power.bits.dscl) {
dpp->base.ctx->dc->optimized_required = true;
@@ -956,6 +961,15 @@ static void dpp401_dscl_program_isharp(struct dpp *dpp_base,
*bs_coeffs_updated = false;
PERF_TRACE();
+ /*power on isharp_delta_mem first*/
+ if (dpp_base->ctx->dc->caps.ips_v2_support) {
+ /*HW default is LS, need to wake up*/
+ REG_UPDATE_2(ISHARP_DELTA_LUT_MEM_PWR_CTRL,
+ ISHARP_DELTA_LUT_MEM_PWR_FORCE, 0,
+ ISHARP_DELTA_LUT_MEM_PWR_DIS, 1);
+ REG_WAIT(ISHARP_DELTA_LUT_MEM_PWR_CTRL,
+ ISHARP_DELTA_LUT_MEM_PWR_STATE, 0, 1, 100);
+ }
/* ISHARP_MODE */
REG_SET_6(ISHARP_MODE, 0,
ISHARP_EN, scl_data->dscl_prog_data.isharp_en,
@@ -1033,6 +1047,13 @@ static void dpp401_dscl_program_isharp(struct dpp *dpp_base,
}
}
+ /*power on isharp_delta_mem first*/
+ if (dpp_base->ctx->dc->caps.ips_v2_support) {
+ /*HW default is LS, need to wake up*/
+ REG_UPDATE_SEQ_2(ISHARP_DELTA_LUT_MEM_PWR_CTRL,
+ ISHARP_DELTA_LUT_MEM_PWR_FORCE, 0,
+ ISHARP_DELTA_LUT_MEM_PWR_DIS, 0);
+ }
PERF_TRACE();
} // dpp401_dscl_program_isharp
/**
diff --git a/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c
index 759b453385c4..92ed130aeaec 100644
--- a/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/hpo/dcn31/dcn31_hpo_dp_stream_encoder.c
@@ -640,6 +640,11 @@ static void dcn31_hpo_dp_stream_enc_audio_setup(
REG_UPDATE(DP_STREAM_ENC_AUDIO_CONTROL,
DP_STREAM_ENC_INPUT_MUX_AUDIO_STREAM_SOURCE_SEL, az_inst);
+ if (enc3->hpo_se_mask->DP_STREAM_ENC_APG_CLOCK_EN) {
+ /*enable apg clk*/
+ REG_UPDATE(DP_STREAM_ENC_AUDIO_CONTROL,
+ DP_STREAM_ENC_APG_CLOCK_EN, 1);
+ }
ASSERT(enc->apg);
enc->apg->funcs->se_audio_setup(enc->apg, az_inst, info);
}
diff --git a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
index 941dce439e97..b0a4b68cf359 100644
--- a/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/hubbub/dcn401/dcn401_hubbub.c
@@ -1151,8 +1151,6 @@ void dcn401_program_compbuf_segments(struct hubbub *hubbub, unsigned compbuf_siz
{
struct dcn20_hubbub *hubbub2 = TO_DCN20_HUBBUB(hubbub);
- unsigned int cur_compbuf_size_seg = 0;
-
if (safe_to_increase || compbuf_size_seg <= hubbub2->compbuf_size_segments) {
if (compbuf_size_seg > hubbub2->compbuf_size_segments) {
REG_WAIT(DCHUBBUB_DET0_CTRL, DET0_SIZE_CURRENT, hubbub2->det0_size, 1, 100);
@@ -1165,8 +1163,6 @@ void dcn401_program_compbuf_segments(struct hubbub *hubbub, unsigned compbuf_siz
+ hubbub2->det3_size + compbuf_size_seg <= hubbub2->crb_size_segs);
REG_UPDATE(DCHUBBUB_COMPBUF_CTRL, COMPBUF_SIZE, compbuf_size_seg);
hubbub2->compbuf_size_segments = compbuf_size_seg;
-
- ASSERT(REG_GET(DCHUBBUB_COMPBUF_CTRL, CONFIG_ERROR, &cur_compbuf_size_seg) && !cur_compbuf_size_seg);
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
index c205500290ec..4985e885952d 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.c
@@ -41,12 +41,12 @@
hubp2->hubp_shift->field_name, hubp2->hubp_mask->field_name
void hubp401_program_3dlut_fl_addr(struct hubp *hubp,
- const struct dc_plane_address address)
+ const struct dc_plane_address *address)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
- REG_UPDATE(HUBP_3DLUT_ADDRESS_HIGH, HUBP_3DLUT_ADDRESS_HIGH, address.lut3d.addr.high_part);
- REG_WRITE(HUBP_3DLUT_ADDRESS_LOW, address.lut3d.addr.low_part);
+ REG_UPDATE(HUBP_3DLUT_ADDRESS_HIGH, HUBP_3DLUT_ADDRESS_HIGH, address->lut3d.addr.high_part);
+ REG_WRITE(HUBP_3DLUT_ADDRESS_LOW, address->lut3d.addr.low_part);
}
void hubp401_program_3dlut_fl_dlg_param(struct hubp *hubp, int refcyc_per_3dlut_group)
@@ -72,33 +72,46 @@ int hubp401_get_3dlut_fl_done(struct hubp *hubp)
return ret;
}
-void hubp401_program_3dlut_fl_addressing_mode(struct hubp *hubp, enum hubp_3dlut_fl_addressing_mode addr_mode)
+static void hubp401_get_3dlut_fl_xbar_map(
+ const enum dc_cm_lut_pixel_format format,
+ enum hubp_3dlut_fl_crossbar_bit_slice *bit_slice_y_g,
+ enum hubp_3dlut_fl_crossbar_bit_slice *bit_slice_cb_b,
+ enum hubp_3dlut_fl_crossbar_bit_slice *bit_slice_cr_r)
{
- struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
-
- REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_ADDRESSING_MODE, addr_mode);
-}
-
-void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width width)
-{
- struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
-
- REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_WIDTH, width);
+ switch (format) {
+ case CM_LUT_PIXEL_FORMAT_BGRA16161616_UNORM_12MSB:
+ case CM_LUT_PIXEL_FORMAT_BGRA16161616_UNORM_12LSB:
+ case CM_LUT_PIXEL_FORMAT_BGRA16161616_FLOAT_FP1_5_10:
+ /* BGRA */
+ *bit_slice_cr_r = hubp_3dlut_fl_crossbar_bit_slice_32_47;
+ *bit_slice_y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
+ *bit_slice_cb_b = hubp_3dlut_fl_crossbar_bit_slice_0_15;
+ break;
+ case CM_LUT_PIXEL_FORMAT_RGBA16161616_UNORM_12MSB:
+ case CM_LUT_PIXEL_FORMAT_RGBA16161616_UNORM_12LSB:
+ case CM_LUT_PIXEL_FORMAT_RGBA16161616_FLOAT_FP1_5_10:
+ default:
+ /* RGBA */
+ *bit_slice_cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15;
+ *bit_slice_y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
+ *bit_slice_cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47;
+ break;
+ }
}
-void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, uint8_t protection_bits)
+void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
+ const enum dc_cm_lut_pixel_format format)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
- REG_UPDATE(HUBP_3DLUT_CONTROL, HUBP_3DLUT_TMZ, protection_bits);
-}
+ enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_y_g = 0;
+ enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b = 0;
+ enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r = 0;
-void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
- enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_y_g,
- enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
- enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r)
-{
- struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ hubp401_get_3dlut_fl_xbar_map(format,
+ &bit_slice_y_g,
+ &bit_slice_cb_b,
+ &bit_slice_cr_r);
REG_UPDATE_3(HUBP_3DLUT_CONTROL,
HUBP_3DLUT_CROSSBAR_SELECT_Y_G, bit_slice_y_g,
@@ -106,62 +119,122 @@ void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
HUBP_3DLUT_CROSSBAR_SELECT_CR_R, bit_slice_cr_r);
}
-void hubp401_update_3dlut_fl_bias_scale(struct hubp *hubp, uint16_t bias, uint16_t scale)
+static enum hubp_3dlut_fl_width hubp401_get_3dlut_fl_width(
+ const enum dc_cm_lut_size size,
+ const enum dc_cm_lut_swizzle swizzle)
{
- struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ enum hubp_3dlut_fl_width width = 0;
+
+ switch (size) {
+ case CM_LUT_SIZE_333333:
+ ASSERT(swizzle != CM_LUT_1D_PACKED_LINEAR);
+ width = hubp_3dlut_fl_width_33;
+ break;
+ case CM_LUT_SIZE_171717:
+ if (swizzle != CM_LUT_1D_PACKED_LINEAR) {
+ width = hubp_3dlut_fl_width_17;
+ } else {
+ width = hubp_3dlut_fl_width_17_transformed;
+ }
+ break;
+ default:
+ width = 0;
+ break;
+ }
- REG_UPDATE_2(_3DLUT_FL_BIAS_SCALE, HUBP0_3DLUT_FL_BIAS, bias, HUBP0_3DLUT_FL_SCALE, scale);
+ return width;
}
-void hubp401_program_3dlut_fl_mode(struct hubp *hubp, enum hubp_3dlut_fl_mode mode)
+static enum hubp_3dlut_fl_format hubp401_get_3dlut_fl_format(
+ const enum dc_cm_lut_pixel_format dc_format)
{
- struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ enum hubp_3dlut_fl_format hubp_format = hubp_3dlut_fl_format_unorm_12msb_bitslice;
- REG_UPDATE(_3DLUT_FL_CONFIG, HUBP0_3DLUT_FL_MODE, mode);
+ switch (dc_format) {
+ case CM_LUT_PIXEL_FORMAT_RGBA16161616_UNORM_12MSB:
+ case CM_LUT_PIXEL_FORMAT_BGRA16161616_UNORM_12MSB:
+ hubp_format = hubp_3dlut_fl_format_unorm_12msb_bitslice;
+ break;
+ case CM_LUT_PIXEL_FORMAT_RGBA16161616_UNORM_12LSB:
+ case CM_LUT_PIXEL_FORMAT_BGRA16161616_UNORM_12LSB:
+ hubp_format = hubp_3dlut_fl_format_unorm_12lsb_bitslice;
+ break;
+ case CM_LUT_PIXEL_FORMAT_RGBA16161616_FLOAT_FP1_5_10:
+ case CM_LUT_PIXEL_FORMAT_BGRA16161616_FLOAT_FP1_5_10:
+ hubp_format = hubp_3dlut_fl_format_float_fp1_5_10;
+ break;
+ default:
+ BREAK_TO_DEBUGGER();
+ break;
+ }
+
+ return hubp_format;
}
-void hubp401_program_3dlut_fl_format(struct hubp *hubp, enum hubp_3dlut_fl_format format)
+static enum hubp_3dlut_fl_addressing_mode hubp401_get_3dlut_fl_addr_mode(
+ const enum dc_cm_lut_swizzle swizzle)
{
- struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ enum hubp_3dlut_fl_addressing_mode addr_mode;
+
+ switch (swizzle) {
+ case CM_LUT_1D_PACKED_LINEAR:
+ addr_mode = hubp_3dlut_fl_addressing_mode_simple_linear;
+ break;
+ case CM_LUT_3D_SWIZZLE_LINEAR_RGB:
+ case CM_LUT_3D_SWIZZLE_LINEAR_BGR:
+ default:
+ addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
+ break;
+ }
- REG_UPDATE(_3DLUT_FL_CONFIG, HUBP0_3DLUT_FL_FORMAT, format);
+ return addr_mode;
}
-void hubp401_program_3dlut_fl_config(
- struct hubp *hubp,
- struct hubp_fl_3dlut_config *cfg)
+static enum hubp_3dlut_fl_mode hubp401_get_3dlut_fl_mode(
+ const enum dc_cm_lut_swizzle swizzle)
{
- struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ enum hubp_3dlut_fl_mode mode;
- uint32_t mpc_width = {(cfg->width == 17) ? 0 : 1};
- uint32_t width = {cfg->width};
+ switch (swizzle) {
+ case CM_LUT_3D_SWIZZLE_LINEAR_RGB:
+ mode = hubp_3dlut_fl_mode_native_1;
+ break;
+ case CM_LUT_3D_SWIZZLE_LINEAR_BGR:
+ mode = hubp_3dlut_fl_mode_native_2;
+ break;
+ case CM_LUT_1D_PACKED_LINEAR:
+ mode = hubp_3dlut_fl_mode_transform;
+ break;
+ default:
+ mode = hubp_3dlut_fl_mode_disable;
+ break;
+ }
- if (cfg->layout == DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR)
- width = (cfg->width == 17) ? 4916 : 35940;
+ return mode;
+}
+
+void hubp401_program_3dlut_fl_config(struct hubp *hubp,
+ const struct dc_3dlut_dma *config)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ enum hubp_3dlut_fl_width width = hubp401_get_3dlut_fl_width(config->size, config->swizzle);
+ enum hubp_3dlut_fl_format format = hubp401_get_3dlut_fl_format(config->format);
+ enum hubp_3dlut_fl_addressing_mode addr_mode = hubp401_get_3dlut_fl_addr_mode(config->swizzle);
+ enum hubp_3dlut_fl_mode mode = hubp401_get_3dlut_fl_mode(config->swizzle);
REG_UPDATE_2(_3DLUT_FL_CONFIG,
- HUBP0_3DLUT_FL_MODE, cfg->mode,
- HUBP0_3DLUT_FL_FORMAT, cfg->format);
+ HUBP0_3DLUT_FL_MODE, mode,
+ HUBP0_3DLUT_FL_FORMAT, format);
REG_UPDATE_2(_3DLUT_FL_BIAS_SCALE,
- HUBP0_3DLUT_FL_BIAS, cfg->bias,
- HUBP0_3DLUT_FL_SCALE, cfg->scale);
-
- REG_UPDATE(HUBP_3DLUT_ADDRESS_HIGH,
- HUBP_3DLUT_ADDRESS_HIGH, cfg->address.lut3d.addr.high_part);
- REG_UPDATE(HUBP_3DLUT_ADDRESS_LOW,
- HUBP_3DLUT_ADDRESS_LOW, cfg->address.lut3d.addr.low_part);
-
- //cross bar
- REG_UPDATE_8(HUBP_3DLUT_CONTROL,
- HUBP_3DLUT_MPC_WIDTH, mpc_width,
- HUBP_3DLUT_WIDTH, width,
- HUBP_3DLUT_CROSSBAR_SELECT_CR_R, cfg->crossbar_bit_slice_cr_r,
- HUBP_3DLUT_CROSSBAR_SELECT_Y_G, cfg->crossbar_bit_slice_y_g,
- HUBP_3DLUT_CROSSBAR_SELECT_CB_B, cfg->crossbar_bit_slice_cb_b,
- HUBP_3DLUT_ADDRESSING_MODE, cfg->addr_mode,
- HUBP_3DLUT_TMZ, cfg->protection_bits,
- HUBP_3DLUT_ENABLE, cfg->enabled ? 1 : 0);
+ HUBP0_3DLUT_FL_BIAS, config->bias,
+ HUBP0_3DLUT_FL_SCALE, config->scale);
+
+ REG_UPDATE_3(HUBP_3DLUT_CONTROL,
+ HUBP_3DLUT_WIDTH, width,
+ HUBP_3DLUT_ADDRESSING_MODE, addr_mode,
+ HUBP_3DLUT_TMZ, config->addr.tmz_surface);
}
void hubp401_update_mall_sel(struct hubp *hubp, uint32_t mall_sel, bool c_cursor)
@@ -1058,19 +1131,13 @@ static struct hubp_funcs dcn401_hubp_funcs = {
.hubp_update_mall_sel = hubp401_update_mall_sel,
.hubp_prepare_subvp_buffering = hubp32_prepare_subvp_buffering,
.hubp_program_mcache_id_and_split_coordinate = hubp401_program_mcache_id_and_split_coordinate,
- .hubp_update_3dlut_fl_bias_scale = hubp401_update_3dlut_fl_bias_scale,
- .hubp_program_3dlut_fl_mode = hubp401_program_3dlut_fl_mode,
- .hubp_program_3dlut_fl_format = hubp401_program_3dlut_fl_format,
.hubp_program_3dlut_fl_addr = hubp401_program_3dlut_fl_addr,
+ .hubp_program_3dlut_fl_config = hubp401_program_3dlut_fl_config,
.hubp_program_3dlut_fl_dlg_param = hubp401_program_3dlut_fl_dlg_param,
.hubp_enable_3dlut_fl = hubp401_enable_3dlut_fl,
- .hubp_program_3dlut_fl_addressing_mode = hubp401_program_3dlut_fl_addressing_mode,
- .hubp_program_3dlut_fl_width = hubp401_program_3dlut_fl_width,
- .hubp_program_3dlut_fl_tmz_protected = hubp401_program_3dlut_fl_tmz_protected,
.hubp_program_3dlut_fl_crossbar = hubp401_program_3dlut_fl_crossbar,
.hubp_get_3dlut_fl_done = hubp401_get_3dlut_fl_done,
.hubp_clear_tiling = hubp401_clear_tiling,
- .hubp_program_3dlut_fl_config = hubp401_program_3dlut_fl_config,
.hubp_read_reg_state = hubp3_read_reg_state
};
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
index 4570b8016de5..043948f64b86 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn401/dcn401_hubp.h
@@ -328,32 +328,17 @@ int hubp401_get_3dlut_fl_done(struct hubp *hubp);
void hubp401_set_unbounded_requesting(struct hubp *hubp, bool enable);
-void hubp401_update_3dlut_fl_bias_scale(struct hubp *hubp, uint16_t bias, uint16_t scale);
-
void hubp401_program_3dlut_fl_crossbar(struct hubp *hubp,
- enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_y_g,
- enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
- enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r);
-
-void hubp401_program_3dlut_fl_tmz_protected(struct hubp *hubp, uint8_t protection_bits);
-
-void hubp401_program_3dlut_fl_width(struct hubp *hubp, enum hubp_3dlut_fl_width width);
-
-void hubp401_program_3dlut_fl_addressing_mode(struct hubp *hubp, enum hubp_3dlut_fl_addressing_mode addr_mode);
+ const enum dc_cm_lut_pixel_format format);
void hubp401_enable_3dlut_fl(struct hubp *hubp, bool enable);
void hubp401_program_3dlut_fl_dlg_param(struct hubp *hubp, int refcyc_per_3dlut_group);
-void hubp401_program_3dlut_fl_addr(struct hubp *hubp, const struct dc_plane_address address);
+void hubp401_program_3dlut_fl_addr(struct hubp *hubp, const struct dc_plane_address *address);
-void hubp401_program_3dlut_fl_format(struct hubp *hubp, enum hubp_3dlut_fl_format format);
-
-void hubp401_program_3dlut_fl_mode(struct hubp *hubp, enum hubp_3dlut_fl_mode mode);
-
-void hubp401_program_3dlut_fl_config(
- struct hubp *hubp,
- struct hubp_fl_3dlut_config *cfg);
+void hubp401_program_3dlut_fl_config(struct hubp *hubp,
+ const struct dc_3dlut_dma *config);
void hubp401_clear_tiling(struct hubp *hubp);
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.c b/drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.c
index 07c38dc03960..0e33c739f459 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.c
@@ -147,13 +147,16 @@ static void hubp42_program_pixel_format(
/* don't see the need of program the xbar in DCN 1.0 */
}
-void hubp42_program_deadline(
+static void hubp42_program_deadline(
struct hubp *hubp,
struct dml2_display_dlg_regs *dlg_attr,
struct dml2_display_ttu_regs *ttu_attr)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ /* put DLG in mission mode */
+ REG_WRITE(HUBPREQ_DEBUG_DB, 0);
+
/* DLG - Per hubp */
REG_SET_2(BLANK_OFFSET_0, 0,
REFCYC_H_BLANK_END, dlg_attr->refcyc_h_blank_end,
@@ -274,19 +277,84 @@ static void hubp42_program_surface_config(
hubp42_program_pixel_format(hubp, format);
}
+static void hubp42_get_3dlut_fl_xbar_map(
+ const enum dc_cm_lut_pixel_format format,
+ enum hubp_3dlut_fl_crossbar_bit_slice *bit_slice_y_g,
+ enum hubp_3dlut_fl_crossbar_bit_slice *bit_slice_cb_b,
+ enum hubp_3dlut_fl_crossbar_bit_slice *bit_slice_cr_r)
+{
+ switch (format) {
+ case CM_LUT_PIXEL_FORMAT_BGRA16161616_UNORM_12MSB:
+ case CM_LUT_PIXEL_FORMAT_BGRA16161616_UNORM_12LSB:
+ case CM_LUT_PIXEL_FORMAT_BGRA16161616_FLOAT_FP1_5_10:
+ /* BGRA */
+ *bit_slice_cr_r = hubp_3dlut_fl_crossbar_bit_slice_32_47;
+ *bit_slice_y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
+ *bit_slice_cb_b = hubp_3dlut_fl_crossbar_bit_slice_0_15;
+ break;
+ case CM_LUT_PIXEL_FORMAT_RGBA16161616_UNORM_12MSB:
+ case CM_LUT_PIXEL_FORMAT_RGBA16161616_UNORM_12LSB:
+ case CM_LUT_PIXEL_FORMAT_RGBA16161616_FLOAT_FP1_5_10:
+ default:
+ /* RGBA */
+ *bit_slice_cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15;
+ *bit_slice_y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
+ *bit_slice_cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47;
+ break;
+ }
+}
+
void hubp42_program_3dlut_fl_crossbar(struct hubp *hubp,
- enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_r,
- enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_g,
- enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_b)
+ const enum dc_cm_lut_pixel_format format)
{
struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+ enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_g = 0;
+ enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_b = 0;
+ enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_r = 0;
+
+ hubp42_get_3dlut_fl_xbar_map(format,
+ &bit_slice_g,
+ &bit_slice_b,
+ &bit_slice_r);
+
REG_UPDATE_3(HUBP_3DLUT_CONTROL,
HUBP_3DLUT_CROSSBAR_SEL_R, bit_slice_r,
HUBP_3DLUT_CROSSBAR_SEL_G, bit_slice_g,
HUBP_3DLUT_CROSSBAR_SEL_B, bit_slice_b);
}
+static uint32_t hubp42_get_3dlut_fl_mpc_width(
+ const enum dc_cm_lut_size size)
+{
+ uint32_t width = 0;
+
+ switch (size) {
+ case CM_LUT_SIZE_333333:
+ width = 1;
+ break;
+ case CM_LUT_SIZE_171717:
+ default:
+ width = 0;
+ break;
+ }
+
+ return width;
+}
+
+void hubp42_program_3dlut_fl_config(struct hubp *hubp,
+ const struct dc_3dlut_dma *config)
+{
+ struct dcn20_hubp *hubp2 = TO_DCN20_HUBP(hubp);
+
+ uint32_t mpc_width = hubp42_get_3dlut_fl_mpc_width(config->size);
+
+ REG_UPDATE(HUBP_3DLUT_CONTROL,
+ HUBP_3DLUT_MPC_WIDTH, mpc_width);
+
+ hubp401_program_3dlut_fl_config(hubp, config);
+}
+
static bool hubp42_program_surface_flip_and_addr(
struct hubp *hubp,
const struct dc_plane_address *address,
@@ -548,6 +616,7 @@ struct hubp_funcs dcn42_hubp_funcs = {
.hubp_setup_interdependent2 = hubp401_setup_interdependent,
.hubp_set_vm_system_aperture_settings = hubp3_set_vm_system_aperture_settings,
.set_blank = hubp2_set_blank,
+ .set_blank_regs = hubp2_set_blank_regs,
.dcc_control = hubp3_dcc_control,
.hubp_reset = hubp_reset,
.mem_program_viewport = min_set_viewport,
@@ -567,17 +636,13 @@ struct hubp_funcs dcn42_hubp_funcs = {
.hubp_set_flip_int = hubp1_set_flip_int,
.hubp_in_blank = hubp1_in_blank,
.program_extended_blank = hubp31_program_extended_blank_value,
- .hubp_update_3dlut_fl_bias_scale = hubp401_update_3dlut_fl_bias_scale,
- .hubp_program_3dlut_fl_mode = hubp401_program_3dlut_fl_mode,
- .hubp_program_3dlut_fl_format = hubp401_program_3dlut_fl_format,
.hubp_program_3dlut_fl_addr = hubp401_program_3dlut_fl_addr,
+ .hubp_program_3dlut_fl_config = hubp42_program_3dlut_fl_config,
.hubp_program_3dlut_fl_dlg_param = hubp401_program_3dlut_fl_dlg_param,
.hubp_enable_3dlut_fl = hubp401_enable_3dlut_fl,
- .hubp_program_3dlut_fl_addressing_mode = hubp401_program_3dlut_fl_addressing_mode,
- .hubp_program_3dlut_fl_width = hubp401_program_3dlut_fl_width,
- .hubp_program_3dlut_fl_tmz_protected = hubp401_program_3dlut_fl_tmz_protected,
.hubp_program_3dlut_fl_crossbar = hubp42_program_3dlut_fl_crossbar,
.hubp_get_3dlut_fl_done = hubp401_get_3dlut_fl_done,
+ .hubp_clear_tiling = hubp3_clear_tiling,
.hubp_program_3dlut_fl_config = hubp401_program_3dlut_fl_config,
.hubp_read_reg_state = hubp3_read_reg_state
};
diff --git a/drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.h b/drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.h
index 976614f38981..486c8907413a 100644
--- a/drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/hubp/dcn42/dcn42_hubp.h
@@ -56,11 +56,11 @@ bool hubp42_construct(
const struct dcn_hubp2_shift *hubp_shift,
const struct dcn_hubp2_mask *hubp_mask);
-void hubp42_program_3dlut_fl_crossbar(
- struct hubp *hubp,
- enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_r,
- enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_g,
- enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_b);
+void hubp42_program_3dlut_fl_crossbar(struct hubp *hubp,
+ const enum dc_cm_lut_pixel_format format);
+
+void hubp42_program_3dlut_fl_config(struct hubp *hubp,
+ const struct dc_3dlut_dma *config);
void hubp42_read_state(struct hubp *hubp);
@@ -70,10 +70,4 @@ void hubp42_setup(
union dml2_global_sync_programming *pipe_global_sync,
struct dc_crtc_timing *timing);
-void hubp42_program_deadline(
- struct hubp *hubp,
- struct dml2_display_dlg_regs *dlg_attr,
- struct dml2_display_ttu_regs *ttu_attr);
-
-
#endif /* __DC_HUBP_DCN42_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
index 8aafd460c36f..8a17cc036399 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c
@@ -86,9 +86,9 @@
hws->ctx
#define DC_LOGGER \
- ctx->logger
-#define DC_LOGGER_INIT() \
- struct dc_context *ctx = dc->ctx
+ dc_ctx->logger
+#define DC_LOGGER_INIT(ctx) \
+ struct dc_context *dc_ctx = ctx
#define REG(reg)\
hws->regs->reg
@@ -661,45 +661,16 @@ void dce110_update_info_frame(struct pipe_ctx *pipe_ctx)
}
static void
-dce110_external_encoder_control(enum bp_external_encoder_control_action action,
- struct dc_link *link,
- struct dc_crtc_timing *timing)
+dce110_dac_encoder_control(struct pipe_ctx *pipe_ctx, bool enable)
{
- struct dc *dc = link->ctx->dc;
+ struct dc_link *link = pipe_ctx->stream->link;
struct dc_bios *bios = link->ctx->dc_bios;
- const struct dc_link_settings *link_settings = &link->cur_link_settings;
- enum bp_result bp_result = BP_RESULT_OK;
- struct bp_external_encoder_control ext_cntl = {
- .action = action,
- .connector_obj_id = link->link_enc->connector,
- .encoder_id = link->ext_enc_id,
- .lanes_number = link_settings->lane_count,
- .link_rate = link_settings->link_rate,
-
- /* Use signal type of the real link encoder, ie. DP */
- .signal = link->connector_signal,
-
- /* We don't know the timing yet when executing the SETUP action,
- * so use a reasonably high default value. It seems that ENABLE
- * can change the actual pixel clock but doesn't work with higher
- * pixel clocks than what SETUP was called with.
- */
- .pixel_clock = timing ? timing->pix_clk_100hz / 10 : 300000,
- .color_depth = timing ? timing->display_color_depth : COLOR_DEPTH_888,
- };
- DC_LOGGER_INIT();
-
- bp_result = bios->funcs->external_encoder_control(bios, &ext_cntl);
-
- if (bp_result != BP_RESULT_OK)
- DC_LOG_ERROR("Failed to execute external encoder action: 0x%x\n", action);
-}
+ struct bp_encoder_control encoder_control = {0};
-static void
-dce110_prepare_ddc(struct dc_link *link)
-{
- if (link->ext_enc_id.id)
- dce110_external_encoder_control(EXTERNAL_ENCODER_CONTROL_DDC_SETUP, link, NULL);
+ encoder_control.action = enable ? ENCODER_CONTROL_ENABLE : ENCODER_CONTROL_DISABLE;
+ encoder_control.engine_id = link->link_enc->analog_engine;
+ encoder_control.pixel_clock = pipe_ctx->stream->timing.pix_clk_100hz / 10;
+ bios->funcs->encoder_control(bios, &encoder_control);
}
static bool
@@ -709,8 +680,7 @@ dce110_dac_load_detect(struct dc_link *link)
struct link_encoder *link_enc = link->link_enc;
enum bp_result bp_result;
- bp_result = bios->funcs->dac_load_detection(
- bios, link_enc->analog_engine, link->ext_enc_id);
+ bp_result = bios->funcs->dac_load_detection(bios, link_enc->analog_engine);
return bp_result == BP_RESULT_OK;
}
@@ -726,6 +696,7 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
uint32_t early_control = 0;
struct timing_generator *tg = pipe_ctx->stream_res.tg;
+ link_hwss->setup_stream_attribute(pipe_ctx);
link_hwss->setup_stream_encoder(pipe_ctx);
dc->hwss.update_info_frame(pipe_ctx);
@@ -744,8 +715,8 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
tg->funcs->set_early_control(tg, early_control);
- if (link->ext_enc_id.id)
- dce110_external_encoder_control(EXTERNAL_ENCODER_CONTROL_ENABLE, link, timing);
+ if (dc_is_rgb_signal(pipe_ctx->stream->signal))
+ dce110_dac_encoder_control(pipe_ctx, true);
}
static enum bp_result link_transmitter_control(
@@ -767,13 +738,14 @@ void dce110_edp_wait_for_hpd_ready(
struct dc_link *link,
bool power_up)
{
- struct dc_context *ctx = link->ctx;
struct graphics_object_id connector = link->link_enc->connector;
bool edp_hpd_high = false;
uint32_t time_elapsed = 0;
uint32_t timeout = power_up ?
PANEL_POWER_UP_TIMEOUT : PANEL_POWER_DOWN_TIMEOUT;
+ DC_LOGGER_INIT(link->ctx);
+
if (dal_graphics_object_id_get_connector_id(connector)
!= CONNECTOR_ID_EDP) {
BREAK_TO_DEBUGGER();
@@ -825,6 +797,7 @@ void dce110_edp_power_control(
enum bp_result bp_result;
uint8_t pwrseq_instance;
+ DC_LOGGER_INIT(ctx);
if (dal_graphics_object_id_get_connector_id(link->link_enc->connector)
!= CONNECTOR_ID_EDP) {
@@ -993,6 +966,8 @@ void dce110_edp_backlight_control(
unsigned int pre_T11_delay = (link->dpcd_sink_ext_caps.bits.oled ? OLED_PRE_T11_DELAY : 0);
unsigned int post_T7_delay = (link->dpcd_sink_ext_caps.bits.oled ? OLED_POST_T7_DELAY : 0);
+ DC_LOGGER_INIT(ctx);
+
if (dal_graphics_object_id_get_connector_id(link->link_enc->connector)
!= CONNECTOR_ID_EDP) {
BREAK_TO_DEBUGGER();
@@ -1240,8 +1215,8 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx)
link_enc->transmitter - TRANSMITTER_UNIPHY_A);
}
- if (link->ext_enc_id.id)
- dce110_external_encoder_control(EXTERNAL_ENCODER_CONTROL_DISABLE, link, NULL);
+ if (dc_is_rgb_signal(pipe_ctx->stream->signal))
+ dce110_dac_encoder_control(pipe_ctx, false);
}
void dce110_unblank_stream(struct pipe_ctx *pipe_ctx,
@@ -1623,6 +1598,22 @@ static enum dc_status dce110_enable_stream_timing(
return DC_OK;
}
+static void
+dce110_select_crtc_source(struct pipe_ctx *pipe_ctx)
+{
+ struct dc_link *link = pipe_ctx->stream->link;
+ struct dc_bios *bios = link->ctx->dc_bios;
+ struct bp_crtc_source_select crtc_source_select = {0};
+ enum engine_id engine_id = link->link_enc->preferred_engine;
+
+ if (dc_is_rgb_signal(pipe_ctx->stream->signal))
+ engine_id = link->link_enc->analog_engine;
+ crtc_source_select.controller_id = CONTROLLER_ID_D0 + pipe_ctx->stream_res.tg->inst;
+ crtc_source_select.color_depth = pipe_ctx->stream->timing.display_color_depth;
+ crtc_source_select.engine_id = engine_id;
+ crtc_source_select.sink_signal = pipe_ctx->stream->signal;
+ bios->funcs->select_crtc_source(bios, &crtc_source_select);
+}
enum dc_status dce110_apply_single_controller_ctx_to_hw(
struct pipe_ctx *pipe_ctx,
@@ -1643,6 +1634,10 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
hws->funcs.disable_stream_gating(dc, pipe_ctx);
}
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_RGB) {
+ dce110_select_crtc_source(pipe_ctx);
+ }
+
if (pipe_ctx->stream_res.audio != NULL) {
struct audio_output audio_output = {0};
@@ -1722,7 +1717,8 @@ enum dc_status dce110_apply_single_controller_ctx_to_hw(
pipe_ctx->stream_res.tg->funcs->set_static_screen_control(
pipe_ctx->stream_res.tg, event_triggers, 2);
- if (!dc_is_virtual_signal(pipe_ctx->stream->signal))
+ if (!dc_is_virtual_signal(pipe_ctx->stream->signal) &&
+ !dc_is_rgb_signal(pipe_ctx->stream->signal))
pipe_ctx->stream_res.stream_enc->funcs->dig_connect_to_otg(
pipe_ctx->stream_res.stream_enc,
pipe_ctx->stream_res.tg->inst);
@@ -1944,6 +1940,35 @@ static void clean_up_dsc_blocks(struct dc *dc)
}
}
+static void dc_hwss_enable_otg_pwa(
+ struct dc *dc,
+ struct pipe_ctx *pipe_ctx)
+{
+ struct timing_generator *tg = NULL;
+
+ if (dc->debug.enable_otg_frame_sync_pwa == 0)
+ return;
+
+ if (pipe_ctx == NULL || pipe_ctx->stream_res.tg == NULL)
+ return;
+ tg = pipe_ctx->stream_res.tg;
+
+ /*only enable this if one active*/
+ if (tg->funcs->enable_otg_pwa) {
+ struct otc_pwa_frame_sync pwa_param = {0};
+
+ DC_LOGGER_INIT(dc->ctx);
+ /* mode 1 to choose generate pwa sync signal on line 0 counting
+ * from vstartup at very beginning of the frame
+ */
+ pwa_param.pwa_frame_sync_line_offset = 0;
+ pwa_param.pwa_sync_mode = DC_OTG_PWA_FRAME_SYNC_MODE_VSTARTUP;
+ /*frame sync line for generating high frame sync*/
+ tg->funcs->enable_otg_pwa(tg, &pwa_param);
+ DC_LOG_DC("Enable OTG PWA frame sync on TG %d\n", tg->inst);
+ }
+}
+
/*
* When ASIC goes from VBIOS/VGA mode to driver/accelerated mode we need:
* 1. Power down all DC HW blocks
@@ -1969,8 +1994,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
bool keep_edp_vdd_on = false;
bool should_clean_dsc_block = true;
struct dc_bios *dcb = dc->ctx->dc_bios;
- DC_LOGGER_INIT();
-
+ DC_LOGGER_INIT(dc->ctx);
get_edp_links_with_sink(dc, edp_links_with_sink, &edp_with_sink_num);
dc_get_edp_links(dc, edp_links, &edp_num);
@@ -2021,6 +2045,7 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
// If VBios supports it, we check it from reigster or other flags.
pipe_ctx->stream_res.pix_clk_params.dio_se_pix_per_cycle = 1;
}
+ dc_hwss_enable_otg_pwa(dc, pipe_ctx);
}
break;
}
@@ -2590,6 +2615,18 @@ enum dc_status dce110_apply_ctx_to_hw(
#endif
}
+
+ if (dc->debug.enable_otg_frame_sync_pwa && context->stream_count == 1) {
+ /* only enable this on one OTG*/
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (pipe_ctx && pipe_ctx->stream != NULL) {
+ dc_hwss_enable_otg_pwa(dc, pipe_ctx);
+ break;
+ }
+ }
+ }
if (dc->fbc_compressor)
enable_fbc(dc, dc->current_state);
@@ -2736,7 +2773,6 @@ static bool wait_for_reset_trigger_to_occur(
struct dc_context *dc_ctx,
struct timing_generator *tg)
{
- struct dc_context *ctx = dc_ctx;
bool rc = false;
/* To avoid endless loop we wait at most
@@ -2778,10 +2814,9 @@ static void dce110_enable_timing_synchronization(
int group_size,
struct pipe_ctx *grouped_pipes[])
{
- struct dc_context *dc_ctx = dc->ctx;
struct dcp_gsl_params gsl_params = { 0 };
int i;
- DC_LOGGER_INIT();
+ DC_LOGGER_INIT(dc->ctx);
DC_SYNC_INFO("GSL: Setting-up...\n");
@@ -2824,10 +2859,9 @@ static void dce110_enable_per_frame_crtc_position_reset(
int group_size,
struct pipe_ctx *grouped_pipes[])
{
- struct dc_context *dc_ctx = dc->ctx;
struct dcp_gsl_params gsl_params = { 0 };
int i;
- DC_LOGGER_INIT();
+ DC_LOGGER_INIT(dc->ctx);
gsl_params.gsl_group = 0;
gsl_params.gsl_master = 0;
@@ -3320,15 +3354,6 @@ void dce110_enable_tmds_link_output(struct dc_link *link,
link->phy_state.symclk_state = SYMCLK_ON_TX_ON;
}
-static void dce110_enable_analog_link_output(
- struct dc_link *link,
- uint32_t pix_clk_100hz)
-{
- link->link_enc->funcs->enable_analog_output(
- link->link_enc,
- pix_clk_100hz);
-}
-
void dce110_enable_dp_link_output(
struct dc_link *link,
const struct link_resource *link_res,
@@ -3376,11 +3401,6 @@ void dce110_enable_dp_link_output(
}
}
- if (link->ext_enc_id.id) {
- dce110_external_encoder_control(EXTERNAL_ENCODER_CONTROL_INIT, link, NULL);
- dce110_external_encoder_control(EXTERNAL_ENCODER_CONTROL_SETUP, link, NULL);
- }
-
if (dc->link_srv->dp_get_encoding_format(link_settings) == DP_8b_10b_ENCODING) {
if (dc->clk_mgr->funcs->notify_link_rate_change)
dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link);
@@ -3471,10 +3491,8 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.enable_lvds_link_output = dce110_enable_lvds_link_output,
.enable_tmds_link_output = dce110_enable_tmds_link_output,
.enable_dp_link_output = dce110_enable_dp_link_output,
- .enable_analog_link_output = dce110_enable_analog_link_output,
.disable_link_output = dce110_disable_link_output,
.dac_load_detect = dce110_dac_load_detect,
- .prepare_ddc = dce110_prepare_ddc,
};
static const struct hwseq_private_funcs dce110_private_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index a2d28be480e8..17ff66d9a617 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -60,9 +60,9 @@
#include "dc_state_priv.h"
#define DC_LOGGER \
- dc_logger
-#define DC_LOGGER_INIT(logger) \
- struct dal_logger *dc_logger = logger
+ dc_ctx->logger
+#define DC_LOGGER_INIT(ctx) \
+ struct dc_context *dc_ctx = ctx
#define CTX \
hws->ctx
@@ -1009,7 +1009,7 @@ static void power_on_plane_resources(
struct dce_hwseq *hws,
int plane_id)
{
- DC_LOGGER_INIT(hws->ctx->logger);
+ DC_LOGGER_INIT(hws->ctx);
if (hws->funcs.dpp_root_clock_control)
hws->funcs.dpp_root_clock_control(hws, plane_id, true);
@@ -1286,7 +1286,7 @@ static void dcn10_reset_back_end_for_pipe(
{
int i;
struct dc_link *link;
- DC_LOGGER_INIT(dc->ctx->logger);
+ DC_LOGGER_INIT(dc->ctx);
if (pipe_ctx->stream_res.stream_enc == NULL) {
pipe_ctx->stream = NULL;
return;
@@ -1422,12 +1422,10 @@ void dcn10_verify_allow_pstate_change_high(struct dc *dc)
return;
if (!hubbub->funcs->verify_allow_pstate_change_high(hubbub)) {
- int i = 0;
-
if (should_log_hw_state)
dcn10_log_hw_state(dc, NULL);
- TRACE_DC_PIPE_STATE(pipe_ctx, i, MAX_PIPES);
+ TRACE_DC_PIPE_STATE(pipe_ctx, MAX_PIPES);
BREAK_TO_DEBUGGER();
if (dcn10_hw_wa_force_recovery(dc)) {
/*check again*/
@@ -1490,7 +1488,7 @@ void dcn10_plane_atomic_power_down(struct dc *dc,
struct hubp *hubp)
{
struct dce_hwseq *hws = dc->hwseq;
- DC_LOGGER_INIT(dc->ctx->logger);
+ DC_LOGGER_INIT(dc->ctx);
if (REG(DC_IP_REQUEST_CNTL)) {
REG_SET(DC_IP_REQUEST_CNTL, 0,
@@ -1554,7 +1552,7 @@ void dcn10_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
void dcn10_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
- DC_LOGGER_INIT(dc->ctx->logger);
+ DC_LOGGER_INIT(dc->ctx);
if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
return;
@@ -2268,8 +2266,6 @@ static bool wait_for_reset_trigger_to_occur(
{
bool rc = false;
- DC_LOGGER_INIT(dc_ctx->logger);
-
/* To avoid endless loop we wait at most
* frames_to_wait_on_triggered_reset frames for the reset to occur. */
const uint32_t frames_to_wait_on_triggered_reset = 10;
@@ -2384,7 +2380,6 @@ static uint8_t get_clock_divider(struct pipe_ctx *pipe,
static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
struct pipe_ctx *grouped_pipes[])
{
- struct dc_context *dc_ctx = dc->ctx;
int i, master = -1, embedded = -1;
struct dc_crtc_timing *hw_crtc_timing;
uint64_t phase[MAX_PIPES];
@@ -2397,7 +2392,7 @@ static int dcn10_align_pixel_clocks(struct dc *dc, int group_size,
uint32_t dp_ref_clk_100hz =
dc->res_pool->dp_clock_source->ctx->dc->clk_mgr->dprefclk_khz*10;
- DC_LOGGER_INIT(dc_ctx->logger);
+ DC_LOGGER_INIT(dc->ctx);
hw_crtc_timing = kzalloc_objs(*hw_crtc_timing, MAX_PIPES);
if (!hw_crtc_timing)
@@ -2477,12 +2472,11 @@ void dcn10_enable_vblanks_synchronization(
int group_size,
struct pipe_ctx *grouped_pipes[])
{
- struct dc_context *dc_ctx = dc->ctx;
struct output_pixel_processor *opp;
struct timing_generator *tg;
int i, width = 0, height = 0, master;
- DC_LOGGER_INIT(dc_ctx->logger);
+ DC_LOGGER_INIT(dc->ctx);
for (i = 1; i < group_size; i++) {
opp = grouped_pipes[i]->stream_res.opp;
@@ -2543,12 +2537,11 @@ void dcn10_enable_timing_synchronization(
int group_size,
struct pipe_ctx *grouped_pipes[])
{
- struct dc_context *dc_ctx = dc->ctx;
struct output_pixel_processor *opp;
struct timing_generator *tg;
int i, width = 0, height = 0;
- DC_LOGGER_INIT(dc_ctx->logger);
+ DC_LOGGER_INIT(dc->ctx);
DC_SYNC_INFO("Setting up OTG reset trigger\n");
@@ -2624,10 +2617,9 @@ void dcn10_enable_per_frame_crtc_position_reset(
int group_size,
struct pipe_ctx *grouped_pipes[])
{
- struct dc_context *dc_ctx = dc->ctx;
int i;
- DC_LOGGER_INIT(dc_ctx->logger);
+ DC_LOGGER_INIT(dc->ctx);
DC_SYNC_INFO("Setting up\n");
for (i = 0; i < group_size; i++)
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
index db2f7cbb12ff..94f63fd54e3e 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn31/dcn31_hwseq.c
@@ -513,7 +513,6 @@ static void dcn31_reset_back_end_for_pipe(
{
struct dc_link *link;
- DC_LOGGER_INIT(dc->ctx->logger);
if (pipe_ctx->stream_res.stream_enc == NULL) {
pipe_ctx->stream = NULL;
return;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index b5a4cefbd35f..b5f60f59382e 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -60,15 +60,15 @@
#include "dcn20/dcn20_hwseq.h"
#include "dc_state_priv.h"
-#define DC_LOGGER_INIT(logger) \
- struct dal_logger *dc_logger = logger
+#define DC_LOGGER \
+ dc_ctx->logger
+#define DC_LOGGER_INIT(ctx) \
+ struct dc_context *dc_ctx = ctx
#define CTX \
hws->ctx
#define REG(reg)\
hws->regs->reg
-#define DC_LOGGER \
- dc_logger
#undef FN
@@ -331,7 +331,7 @@ static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
struct pipe_ctx *odm_pipe;
int opp_cnt = 1;
- DC_LOGGER_INIT(stream->ctx->logger);
+ DC_LOGGER_INIT(stream->ctx);
ASSERT(dsc);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
@@ -897,7 +897,7 @@ void dcn35_disable_plane(struct dc *dc, struct dc_state *state, struct pipe_ctx
bool is_phantom = dc_state_get_pipe_subvp_type(state, pipe_ctx) == SUBVP_PHANTOM;
struct timing_generator *tg = is_phantom ? pipe_ctx->stream_res.tg : NULL;
- DC_LOGGER_INIT(dc->ctx->logger);
+ DC_LOGGER_INIT(dc->ctx);
if (!pipe_ctx->plane_res.hubp || pipe_ctx->plane_res.hubp->power_gated)
return;
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
index 69cc70106bf0..357899116ecd 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn401/dcn401_hwseq.c
@@ -95,10 +95,6 @@ void dcn401_program_gamut_remap(struct pipe_ctx *pipe_ctx)
unsigned int mpcc_id = pipe_ctx->plane_res.mpcc_inst;
struct mpc *mpc = pipe_ctx->stream_res.opp->ctx->dc->res_pool->mpc;
- //For now assert if location is not pre-blend
- if (pipe_ctx->plane_state)
- ASSERT(pipe_ctx->plane_state->mcm_location == MPCC_MOVABLE_CM_LOCATION_BEFORE);
-
// program MPCC_MCM_FIRST_GAMUT_REMAP
memset(&mpc_adjust, 0, sizeof(mpc_adjust));
mpc_adjust.gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
@@ -304,6 +300,7 @@ void dcn401_init_hw(struct dc *dc)
}
}
}
+
for (i = 0; i < res_pool->audio_count; i++) {
struct audio *audio = res_pool->audios[i];
@@ -372,293 +369,179 @@ void dcn401_init_hw(struct dc *dc)
}
}
-static void dcn401_get_mcm_lut_xable_from_pipe_ctx(struct dc *dc, struct pipe_ctx *pipe_ctx,
- enum MCM_LUT_XABLE *shaper_xable,
- enum MCM_LUT_XABLE *lut3d_xable,
- enum MCM_LUT_XABLE *lut1d_xable)
+void dcn401_trigger_3dlut_dma_load(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
- enum dc_cm2_shaper_3dlut_setting shaper_3dlut_setting = DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL;
- bool lut1d_enable = false;
- struct mpc *mpc = dc->res_pool->mpc;
- int mpcc_id = pipe_ctx->plane_res.hubp->inst;
-
- if (!pipe_ctx->plane_state)
- return;
- shaper_3dlut_setting = pipe_ctx->plane_state->mcm_shaper_3dlut_setting;
- lut1d_enable = pipe_ctx->plane_state->mcm_lut1d_enable;
- mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
- pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE;
-
- *lut1d_xable = lut1d_enable ? MCM_LUT_ENABLE : MCM_LUT_DISABLE;
+ struct hubp *hubp = pipe_ctx->plane_res.hubp;
- switch (shaper_3dlut_setting) {
- case DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL:
- *lut3d_xable = *shaper_xable = MCM_LUT_DISABLE;
- break;
- case DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER:
- *lut3d_xable = MCM_LUT_DISABLE;
- *shaper_xable = MCM_LUT_ENABLE;
- break;
- case DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT:
- *lut3d_xable = *shaper_xable = MCM_LUT_ENABLE;
- break;
+ if (hubp->funcs->hubp_enable_3dlut_fl) {
+ hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
}
}
-void dcn401_populate_mcm_luts(struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- struct dc_cm2_func_luts mcm_luts,
- bool lut_bank_a)
+bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state)
{
+ struct dc *dc = pipe_ctx->plane_res.hubp->ctx->dc;
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ const struct dc_plane_cm *cm = &plane_state->cm;
int mpcc_id = hubp->inst;
struct mpc *mpc = dc->res_pool->mpc;
union mcm_lut_params m_lut_params;
- enum dc_cm2_transfer_func_source lut3d_src = mcm_luts.lut3d_data.lut3d_src;
- enum hubp_3dlut_fl_format format = 0;
- enum hubp_3dlut_fl_mode mode;
- enum hubp_3dlut_fl_width width = 0;
- enum hubp_3dlut_fl_addressing_mode addr_mode;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = 0;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = 0;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = 0;
- enum MCM_LUT_XABLE shaper_xable = MCM_LUT_DISABLE;
- enum MCM_LUT_XABLE lut3d_xable = MCM_LUT_DISABLE;
- enum MCM_LUT_XABLE lut1d_xable = MCM_LUT_DISABLE;
+ struct dc_3dlut_dma lut3d_dma;
+ bool lut_enable;
+ bool lut_bank_a;
bool rval;
+ bool result = true;
- dcn401_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
+ /* decide LUT bank based on current in use */
+ mpc->funcs->get_lut_mode(mpc, MCM_LUT_1DLUT, mpcc_id, &lut_enable, &lut_bank_a);
+ if (!lut_enable) {
+ mpc->funcs->get_lut_mode(mpc, MCM_LUT_SHAPER, mpcc_id, &lut_enable, &lut_bank_a);
+ }
+ if (!lut_enable) {
+ mpc->funcs->get_lut_mode(mpc, MCM_LUT_3DLUT, mpcc_id, &lut_enable, &lut_bank_a);
+ }
+
+ /* switch to the next bank */
+ if (lut_enable) {
+ lut_bank_a = !lut_bank_a;
+ }
+
+ /* MCM location fixed to pre-blend */
+ mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
/* 1D LUT */
- if (mcm_luts.lut1d_func) {
- memset(&m_lut_params, 0, sizeof(m_lut_params));
- if (mcm_luts.lut1d_func->type == TF_TYPE_HWPWL)
- m_lut_params.pwl = &mcm_luts.lut1d_func->pwl;
- else if (mcm_luts.lut1d_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
- rval = cm3_helper_translate_curve_to_hw_format(mpc->ctx,
- mcm_luts.lut1d_func,
- &dpp_base->regamma_params, false);
+ lut_enable = cm->flags.bits.blend_enable;
+ memset(&m_lut_params, 0, sizeof(m_lut_params));
+ if (lut_enable) {
+ if (cm->blend_func.type == TF_TYPE_HWPWL)
+ m_lut_params.pwl = &cm->blend_func.pwl;
+ else if (cm->blend_func.type == TF_TYPE_DISTRIBUTED_POINTS) {
+ rval = cm3_helper_translate_curve_to_hw_format(plane_state->ctx,
+ &cm->blend_func,
+ &dpp_base->regamma_params,
+ false);
m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
}
- if (m_lut_params.pwl) {
- if (mpc->funcs->populate_lut)
- mpc->funcs->populate_lut(mpc, MCM_LUT_1DLUT, m_lut_params, lut_bank_a, mpcc_id);
+
+ if (!m_lut_params.pwl) {
+ lut_enable = false;
}
- if (mpc->funcs->program_lut_mode)
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, lut1d_xable && m_lut_params.pwl, lut_bank_a, mpcc_id);
+ } else {
+ lut_enable = false;
}
+ if (mpc->funcs->program_lut_mode)
+ mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, lut_enable, lut_bank_a, CM_LUT_SIZE_NONE, mpcc_id);
+ if (lut_enable && mpc->funcs->populate_lut)
+ mpc->funcs->populate_lut(mpc, MCM_LUT_1DLUT, &m_lut_params, lut_bank_a, mpcc_id);
+
/* Shaper */
- if (mcm_luts.shaper && mcm_luts.lut3d_data.mpc_3dlut_enable) {
+ lut_enable = cm->flags.bits.shaper_enable;
+ if (lut_enable) {
memset(&m_lut_params, 0, sizeof(m_lut_params));
- if (mcm_luts.shaper->type == TF_TYPE_HWPWL)
- m_lut_params.pwl = &mcm_luts.shaper->pwl;
- else if (mcm_luts.shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
+ if (cm->shaper_func.type == TF_TYPE_HWPWL)
+ m_lut_params.pwl = &cm->shaper_func.pwl;
+ else if (cm->shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) {
ASSERT(false);
- rval = cm3_helper_translate_curve_to_hw_format(mpc->ctx,
- mcm_luts.shaper,
- &dpp_base->regamma_params, true);
- m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
+ rval = cm3_helper_translate_curve_to_hw_format(plane_state->ctx,
+ &cm->shaper_func,
+ &dpp_base->shaper_params,
+ true);
+ m_lut_params.pwl = rval ? &dpp_base->shaper_params : NULL;
}
- if (m_lut_params.pwl) {
- if (mpc->funcs->mcm.populate_lut)
- mpc->funcs->mcm.populate_lut(mpc, m_lut_params, lut_bank_a, mpcc_id);
- if (mpc->funcs->program_lut_mode)
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, MCM_LUT_ENABLE, lut_bank_a, mpcc_id);
+ if (!m_lut_params.pwl) {
+ lut_enable = false;
}
+ } else {
+ lut_enable = false;
}
- /* 3DLUT */
- switch (lut3d_src) {
- case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM:
- memset(&m_lut_params, 0, sizeof(m_lut_params));
- if (hubp->funcs->hubp_enable_3dlut_fl)
- hubp->funcs->hubp_enable_3dlut_fl(hubp, false);
+ if (mpc->funcs->program_lut_mode)
+ mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, lut_enable, lut_bank_a, CM_LUT_SIZE_NONE, mpcc_id);
+ if (lut_enable && mpc->funcs->populate_lut)
+ mpc->funcs->populate_lut(mpc, MCM_LUT_SHAPER, &m_lut_params, lut_bank_a, mpcc_id);
- if (mcm_luts.lut3d_data.lut3d_func && mcm_luts.lut3d_data.lut3d_func->state.bits.initialized) {
- m_lut_params.lut3d = &mcm_luts.lut3d_data.lut3d_func->lut_3d;
- if (mpc->funcs->populate_lut)
- mpc->funcs->populate_lut(mpc, MCM_LUT_3DLUT, m_lut_params, lut_bank_a, mpcc_id);
- if (mpc->funcs->program_lut_mode)
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a,
- mpcc_id);
- }
- break;
- case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
- switch (mcm_luts.lut3d_data.gpu_mem_params.size) {
- case DC_CM2_GPU_MEM_SIZE_171717:
- width = hubp_3dlut_fl_width_17;
- break;
- case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
- width = hubp_3dlut_fl_width_transformed;
- break;
- default:
- //TODO: handle default case
- break;
- }
-
- //check for support
- if (mpc->funcs->mcm.is_config_supported &&
- !mpc->funcs->mcm.is_config_supported(width))
- break;
+ /* NOTE: Toggling from DMA->Host is not supported atomically as hardware
+ * blocks writes until 3DLUT FL mode is cleared from HUBP on VUpdate.
+ * Expectation is either option is used consistently.
+ */
- if (mpc->funcs->program_lut_read_write_control)
- mpc->funcs->program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, mpcc_id);
+ /* 3DLUT */
+ lut_enable = cm->flags.bits.lut3d_enable;
+ if (lut_enable && cm->flags.bits.lut3d_dma_enable) {
+ /* Fast (DMA) Load Mode */
+ /* MPC */
if (mpc->funcs->program_lut_mode)
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a, mpcc_id);
-
- if (hubp->funcs->hubp_program_3dlut_fl_addr)
- hubp->funcs->hubp_program_3dlut_fl_addr(hubp, mcm_luts.lut3d_data.gpu_mem_params.addr);
+ mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut_enable, lut_bank_a, cm->lut3d_dma.size, mpcc_id);
- if (mpc->funcs->mcm.program_bit_depth)
- mpc->funcs->mcm.program_bit_depth(mpc, mcm_luts.lut3d_data.gpu_mem_params.bit_depth, mpcc_id);
+ /* only supports 12 bit */
+ if (mpc->funcs->program_lut_read_write_control)
+ mpc->funcs->program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, 12, mpcc_id);
- switch (mcm_luts.lut3d_data.gpu_mem_params.layout) {
- case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB:
- mode = hubp_3dlut_fl_mode_native_1;
- addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
- break;
- case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR:
- mode = hubp_3dlut_fl_mode_native_2;
- addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
- break;
- case DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR:
- mode = hubp_3dlut_fl_mode_transform;
- addr_mode = hubp_3dlut_fl_addressing_mode_simple_linear;
- break;
- default:
- mode = hubp_3dlut_fl_mode_disable;
- addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
- break;
- }
- if (hubp->funcs->hubp_program_3dlut_fl_mode)
- hubp->funcs->hubp_program_3dlut_fl_mode(hubp, mode);
+ if (mpc->funcs->update_3dlut_fast_load_select)
+ mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst);
- if (hubp->funcs->hubp_program_3dlut_fl_addressing_mode)
- hubp->funcs->hubp_program_3dlut_fl_addressing_mode(hubp, addr_mode);
-
- switch (mcm_luts.lut3d_data.gpu_mem_params.format_params.format) {
- case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB:
- format = hubp_3dlut_fl_format_unorm_12msb_bitslice;
- break;
- case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB:
- format = hubp_3dlut_fl_format_unorm_12lsb_bitslice;
- break;
- case DC_CM2_GPU_MEM_FORMAT_16161616_FLOAT_FP1_5_10:
- format = hubp_3dlut_fl_format_float_fp1_5_10;
- break;
- }
- if (hubp->funcs->hubp_program_3dlut_fl_format)
- hubp->funcs->hubp_program_3dlut_fl_format(hubp, format);
- if (hubp->funcs->hubp_update_3dlut_fl_bias_scale &&
- mpc->funcs->mcm.program_bias_scale) {
- mpc->funcs->mcm.program_bias_scale(mpc,
- mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
- mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale,
- mpcc_id);
- hubp->funcs->hubp_update_3dlut_fl_bias_scale(hubp,
- mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
- mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale);
- }
-
- //navi 4x has a bug and r and blue are swapped and need to be worked around here in
- //TODO: need to make a method for get_xbar per asic OR do the workaround in program_crossbar for 4x
- switch (mcm_luts.lut3d_data.gpu_mem_params.component_order) {
- case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA:
- default:
- crossbar_bit_slice_cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15;
- crossbar_bit_slice_y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
- crossbar_bit_slice_cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47;
- break;
- }
+ /* HUBP */
+ if (hubp->funcs->hubp_program_3dlut_fl_config)
+ hubp->funcs->hubp_program_3dlut_fl_config(hubp, &cm->lut3d_dma);
if (hubp->funcs->hubp_program_3dlut_fl_crossbar)
- hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp,
- crossbar_bit_slice_cr_r,
- crossbar_bit_slice_y_g,
- crossbar_bit_slice_cb_b);
-
- if (mpc->funcs->mcm.program_lut_read_write_control)
- mpc->funcs->mcm.program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, true, mpcc_id);
+ hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp, cm->lut3d_dma.format);
- if (mpc->funcs->mcm.program_3dlut_size)
- mpc->funcs->mcm.program_3dlut_size(mpc, width, mpcc_id);
-
- if (mpc->funcs->update_3dlut_fast_load_select)
- mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst);
+ if (hubp->funcs->hubp_program_3dlut_fl_addr)
+ hubp->funcs->hubp_program_3dlut_fl_addr(hubp, &cm->lut3d_dma.addr);
- if (hubp->funcs->hubp_enable_3dlut_fl)
+ if (hubp->funcs->hubp_enable_3dlut_fl) {
hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
- else {
- if (mpc->funcs->program_lut_mode) {
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
- }
+ } else {
+ /* GPU memory only supports fast load path */
+ BREAK_TO_DEBUGGER();
+ lut_enable = false;
+ result = false;
}
- break;
+ } else {
+ /* Legacy (Host) Load Mode */
+ memset(&m_lut_params, 0, sizeof(m_lut_params));
- }
-}
+ if (cm->flags.bits.lut3d_enable && cm->lut3d_func.state.bits.initialized) {
+ m_lut_params.lut3d = &cm->lut3d_func.lut_3d;
+ } else {
+ lut_enable = false;
+ }
-void dcn401_trigger_3dlut_dma_load(struct dc *dc, struct pipe_ctx *pipe_ctx)
-{
- struct hubp *hubp = pipe_ctx->plane_res.hubp;
+ /* MPC */
+ if (mpc->funcs->program_lut_mode)
+ mpc->funcs->program_lut_mode(mpc,
+ MCM_LUT_3DLUT,
+ lut_enable,
+ lut_bank_a,
+ cm->lut3d_func.lut_3d.use_tetrahedral_9 ? CM_LUT_SIZE_999 : CM_LUT_SIZE_171717,
+ mpcc_id);
+
+ if (lut_enable) {
+ if (mpc->funcs->program_lut_read_write_control)
+ mpc->funcs->program_lut_read_write_control(mpc,
+ MCM_LUT_3DLUT,
+ lut_bank_a,
+ cm->lut3d_func.lut_3d.use_12bits ? 12 : 10,
+ mpcc_id);
- if (hubp->funcs->hubp_enable_3dlut_fl) {
- hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
- }
-}
+ if (mpc->funcs->update_3dlut_fast_load_select)
+ mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, 0xf);
-bool dcn401_set_mcm_luts(struct pipe_ctx *pipe_ctx,
- const struct dc_plane_state *plane_state)
-{
- struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
- int mpcc_id = pipe_ctx->plane_res.hubp->inst;
- struct dc *dc = pipe_ctx->stream_res.opp->ctx->dc;
- struct mpc *mpc = dc->res_pool->mpc;
- bool result;
- const struct pwl_params *lut_params = NULL;
- bool rval;
+ if (mpc->funcs->populate_lut)
+ mpc->funcs->populate_lut(mpc, MCM_LUT_3DLUT, &m_lut_params, lut_bank_a, mpcc_id);
+ }
- if (plane_state->mcm_luts.lut3d_data.lut3d_src == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) {
- dcn401_populate_mcm_luts(dc, pipe_ctx, plane_state->mcm_luts, plane_state->lut_bank_a);
- return true;
- }
+ /* HUBP */
+ memset(&lut3d_dma, 0, sizeof(lut3d_dma));
+ if (hubp->funcs->hubp_program_3dlut_fl_config)
+ hubp->funcs->hubp_program_3dlut_fl_config(hubp, &lut3d_dma);
- mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
- pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE;
- // 1D LUT
- if (plane_state->blend_tf.type == TF_TYPE_HWPWL)
- lut_params = &plane_state->blend_tf.pwl;
- else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) {
- rval = cm3_helper_translate_curve_to_hw_format(plane_state->ctx,
- &plane_state->blend_tf,
- &dpp_base->regamma_params, false);
- lut_params = rval ? &dpp_base->regamma_params : NULL;
- }
- result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id);
- lut_params = NULL;
-
- // Shaper
- if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL)
- lut_params = &plane_state->in_shaper_func.pwl;
- else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) {
- // TODO: dpp_base replace
- rval = cm3_helper_translate_curve_to_hw_format(plane_state->ctx,
- &plane_state->in_shaper_func,
- &dpp_base->shaper_params, true);
- lut_params = rval ? &dpp_base->shaper_params : NULL;
- }
- result &= mpc->funcs->program_shaper(mpc, lut_params, mpcc_id);
-
- // 3D
- if (mpc->funcs->program_3dlut) {
- if (plane_state->lut3d_func.state.bits.initialized == 1)
- result &= mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func.lut_3d, mpcc_id);
- else
- result &= mpc->funcs->program_3dlut(mpc, NULL, mpcc_id);
+ if (hubp->funcs->hubp_enable_3dlut_fl)
+ hubp->funcs->hubp_enable_3dlut_fl(hubp, false);
}
return result;
@@ -982,6 +865,8 @@ void dcn401_enable_stream(struct pipe_ctx *pipe_ctx)
}
}
+ link_hwss->setup_stream_attribute(pipe_ctx);
+
if (dc->res_pool->dccg->funcs->set_pixel_rate_div) {
dc->res_pool->dccg->funcs->set_pixel_rate_div(
dc->res_pool->dccg,
@@ -1930,10 +1815,9 @@ void dcn401_perform_3dlut_wa_unlock(struct pipe_ctx *pipe_ctx)
for (odm_pipe = pipe_ctx; odm_pipe != NULL; odm_pipe = odm_pipe->next_odm_pipe) {
for (mpc_pipe = odm_pipe; mpc_pipe != NULL; mpc_pipe = mpc_pipe->bottom_pipe) {
- if (mpc_pipe->plane_state && mpc_pipe->plane_state->mcm_luts.lut3d_data.lut3d_src
- == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM
- && mpc_pipe->plane_state->mcm_shaper_3dlut_setting
- == DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT) {
+ if (mpc_pipe->plane_state &&
+ mpc_pipe->plane_state->cm.flags.bits.lut3d_enable &&
+ mpc_pipe->plane_state->cm.flags.bits.lut3d_dma_enable) {
wa_pipes[wa_pipe_ct++] = mpc_pipe;
}
}
@@ -1982,7 +1866,6 @@ void dcn401_reset_back_end_for_pipe(
struct dc_link *link = pipe_ctx->stream->link;
const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res);
- DC_LOGGER_INIT(dc->ctx->logger);
if (pipe_ctx->stream_res.stream_enc == NULL) {
pipe_ctx->stream = NULL;
return;
@@ -2061,6 +1944,22 @@ void dcn401_reset_back_end_for_pipe(
DC_LOG_DEBUG("Reset back end for pipe %d, tg:%d\n",
pipe_ctx->pipe_idx, pipe_ctx->stream_res.tg->inst);
}
+static void dc_hwss_disable_otg_pwa(struct dc *dc)
+{
+ if (dc->debug.enable_otg_frame_sync_pwa) {
+ int i;
+
+ /*reset all the otg*/
+ for (i = dc->res_pool->timing_generator_count - 1; i >= 0 ; i--) {
+ struct timing_generator *tg = dc->res_pool->timing_generators[i];
+
+ if (tg->funcs->disable_otg_pwa) {
+ tg->funcs->disable_otg_pwa(tg);
+ DC_LOG_DC("otg frame sync pwa disabled on otg%d\n", tg->inst);
+ }
+ }
+ }
+}
void dcn401_reset_hw_ctx_wrap(
struct dc *dc,
@@ -2069,6 +1968,7 @@ void dcn401_reset_hw_ctx_wrap(
int i;
struct dce_hwseq *hws = dc->hwseq;
+ dc_hwss_disable_otg_pwa(dc);
/* Reset Back End*/
for (i = dc->res_pool->pipe_count - 1; i >= 0 ; i--) {
struct pipe_ctx *pipe_ctx_old =
@@ -2259,6 +2159,10 @@ void dcn401_program_pipe(
pipe_ctx->stream_res.test_pattern_params.height,
pipe_ctx->stream_res.test_pattern_params.offset);
}
+ if (pipe_ctx->plane_state
+ && pipe_ctx->plane_state->update_flags.bits.cm_hist_change
+ && hws->funcs.program_cm_hist)
+ hws->funcs.program_cm_hist(dc, pipe_ctx, pipe_ctx->plane_state);
}
/*
@@ -2410,6 +2314,13 @@ void dcn401_program_pipe_sequence(
pipe_ctx->stream_res.test_pattern_params.offset);
}
+ if (pipe_ctx->plane_state
+ && pipe_ctx->plane_state->update_flags.bits.cm_hist_change
+ && hws->funcs.program_cm_hist) {
+
+ hwss_add_dpp_program_cm_hist(seq_state, pipe_ctx->plane_res.dpp,
+ pipe_ctx->plane_state->cm_hist_control, pipe_ctx->plane_state->color_space);
+ }
}
void dcn401_program_front_end_for_ctx(
@@ -2422,8 +2333,6 @@ void dcn401_program_front_end_for_ctx(
struct dce_hwseq *hws = dc->hwseq;
struct pipe_ctx *pipe = NULL;
- DC_LOGGER_INIT(dc->ctx->logger);
-
if (resource_is_pipe_topology_changed(dc->current_state, context))
resource_log_pipe_topology_update(dc, context);
@@ -2587,8 +2496,6 @@ void dcn401_post_unlock_program_front_end(
struct dce_hwseq *hwseq = dc->hwseq;
int i;
- DC_LOGGER_INIT(dc->ctx->logger);
-
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (resource_is_pipe_type(&dc->current_state->res_ctx.pipe_ctx[i], OPP_HEAD) &&
!resource_is_pipe_type(&context->res_ctx.pipe_ctx[i], OPP_HEAD))
@@ -2968,8 +2875,6 @@ void dcn401_plane_atomic_power_down(struct dc *dc,
struct dce_hwseq *hws = dc->hwseq;
uint32_t org_ip_request_cntl = 0;
- DC_LOGGER_INIT(dc->ctx->logger);
-
if (REG(DC_IP_REQUEST_CNTL)) {
REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
if (org_ip_request_cntl == 0)
@@ -3061,8 +2966,6 @@ void dcn401_plane_atomic_power_down_sequence(struct dc *dc,
struct dce_hwseq *hws = dc->hwseq;
uint32_t org_ip_request_cntl = 0;
- DC_LOGGER_INIT(dc->ctx->logger);
-
/* Check and set DC_IP_REQUEST_CNTL if needed */
if (REG(DC_IP_REQUEST_CNTL)) {
REG_GET(DC_IP_REQUEST_CNTL, IP_REQUEST_EN, &org_ip_request_cntl);
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.c
index f0e1ed0f2949..8e12dc1297c4 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.c
@@ -261,7 +261,7 @@ void dcn42_init_hw(struct dc *dc)
dc->res_pool->hubbub->funcs->init_crb(dc->res_pool->hubbub);
if (dc->res_pool->hubbub->funcs->set_request_limit && dc->config.sdpif_request_limit_words_per_umc > 0)
- dc->res_pool->hubbub->funcs->set_request_limit(dc->res_pool->hubbub, dc->ctx->dc_bios->vram_info.num_chans, dc->config.sdpif_request_limit_words_per_umc);
+ dc->res_pool->hubbub->funcs->set_request_limit(dc->res_pool->hubbub, dc->clk_mgr->bw_params->num_channels, dc->config.sdpif_request_limit_words_per_umc);
// Get DMCUB capabilities
if (dc->ctx->dmub_srv) {
@@ -275,8 +275,7 @@ void dcn42_init_hw(struct dc *dc)
|| res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000 != current_dchub_ref_freq) {
/* update bounding box if FAMS2 disabled, or if dchub clk has changed */
if (dc->clk_mgr)
- dc->res_pool->funcs->update_bw_bounding_box(dc,
- dc->clk_mgr->bw_params);
+ dc->res_pool->funcs->update_bw_bounding_box(dc, dc->clk_mgr->bw_params);
}
}
if (dc->res_pool->pg_cntl) {
@@ -383,68 +382,6 @@ void dcn42_program_cm_hist(
plane_state->cm_hist_control, plane_state->color_space);
}
-static void dc_get_lut_xbar(
- enum dc_cm2_gpu_mem_pixel_component_order order,
- enum hubp_3dlut_fl_crossbar_bit_slice *cr_r,
- enum hubp_3dlut_fl_crossbar_bit_slice *y_g,
- enum hubp_3dlut_fl_crossbar_bit_slice *cb_b)
-{
- switch (order) {
- case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_RGBA:
- *cr_r = hubp_3dlut_fl_crossbar_bit_slice_32_47;
- *y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
- *cb_b = hubp_3dlut_fl_crossbar_bit_slice_0_15;
- break;
- case DC_CM2_GPU_MEM_PIXEL_COMPONENT_ORDER_BGRA:
- *cr_r = hubp_3dlut_fl_crossbar_bit_slice_0_15;
- *y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
- *cb_b = hubp_3dlut_fl_crossbar_bit_slice_32_47;
- break;
- }
-}
-
-static void dc_get_lut_mode(
- enum dc_cm2_gpu_mem_layout layout,
- enum hubp_3dlut_fl_mode *mode,
- enum hubp_3dlut_fl_addressing_mode *addr_mode)
-{
- switch (layout) {
- case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB:
- *mode = hubp_3dlut_fl_mode_native_1;
- *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
- break;
- case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR:
- *mode = hubp_3dlut_fl_mode_native_2;
- *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
- break;
- case DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR:
- *mode = hubp_3dlut_fl_mode_transform;
- *addr_mode = hubp_3dlut_fl_addressing_mode_simple_linear;
- break;
- default:
- *mode = hubp_3dlut_fl_mode_disable;
- *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
- break;
- }
-}
-
-static void dc_get_lut_format(
- enum dc_cm2_gpu_mem_format dc_format,
- enum hubp_3dlut_fl_format *format)
-{
- switch (dc_format) {
- case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB:
- *format = hubp_3dlut_fl_format_unorm_12msb_bitslice;
- break;
- case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB:
- *format = hubp_3dlut_fl_format_unorm_12lsb_bitslice;
- break;
- case DC_CM2_GPU_MEM_FORMAT_16161616_FLOAT_FP1_5_10:
- *format = hubp_3dlut_fl_format_float_fp1_5_10;
- break;
- }
-}
-
static bool dc_is_rmcm_3dlut_supported(struct hubp *hubp, struct mpc *mpc)
{
if (mpc->funcs->rmcm.power_on_shaper_3dlut &&
@@ -455,119 +392,17 @@ static bool dc_is_rmcm_3dlut_supported(struct hubp *hubp, struct mpc *mpc)
return false;
}
-static bool is_rmcm_3dlut_fl_supported(struct dc *dc, enum dc_cm2_gpu_mem_size size)
-{
- if (!dc->caps.color.mpc.rmcm_3d_lut_caps.dma_3d_lut)
- return false;
- if (size == DC_CM2_GPU_MEM_SIZE_171717)
- return (dc->caps.color.mpc.rmcm_3d_lut_caps.lut_dim_caps.dim_17);
- else if (size == DC_CM2_GPU_MEM_SIZE_333333)
- return (dc->caps.color.mpc.rmcm_3d_lut_caps.lut_dim_caps.dim_33);
- return false;
-}
-
-static void dcn42_set_mcm_location_post_blend(struct dc *dc, struct pipe_ctx *pipe_ctx, bool bPostBlend)
-{
- struct mpc *mpc = dc->res_pool->mpc;
- int mpcc_id = pipe_ctx->plane_res.hubp->inst;
-
- if (!pipe_ctx->plane_state)
- return;
-
- mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
- pipe_ctx->plane_state->mcm_location = (bPostBlend) ?
- MPCC_MOVABLE_CM_LOCATION_AFTER :
- MPCC_MOVABLE_CM_LOCATION_BEFORE;
-}
-
-static void dcn42_get_mcm_lut_xable_from_pipe_ctx(struct dc *dc, struct pipe_ctx *pipe_ctx,
- enum MCM_LUT_XABLE *shaper_xable,
- enum MCM_LUT_XABLE *lut3d_xable,
- enum MCM_LUT_XABLE *lut1d_xable)
-{
- enum dc_cm2_shaper_3dlut_setting shaper_3dlut_setting = DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL;
- bool lut1d_enable = false;
- struct mpc *mpc = dc->res_pool->mpc;
- int mpcc_id = pipe_ctx->plane_res.hubp->inst;
-
- if (!pipe_ctx->plane_state)
- return;
- shaper_3dlut_setting = pipe_ctx->plane_state->mcm_shaper_3dlut_setting;
- lut1d_enable = pipe_ctx->plane_state->mcm_lut1d_enable;
- mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
- pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE;
-
- *lut1d_xable = lut1d_enable ? MCM_LUT_ENABLE : MCM_LUT_DISABLE;
-
- switch (shaper_3dlut_setting) {
- case DC_CM2_SHAPER_3DLUT_SETTING_BYPASS_ALL:
- *lut3d_xable = *shaper_xable = MCM_LUT_DISABLE;
- break;
- case DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER:
- *lut3d_xable = MCM_LUT_DISABLE;
- *shaper_xable = MCM_LUT_ENABLE;
- break;
- case DC_CM2_SHAPER_3DLUT_SETTING_ENABLE_SHAPER_3DLUT:
- *lut3d_xable = *shaper_xable = MCM_LUT_ENABLE;
- break;
- }
-}
-
-static void fl_get_lut_mode(
- enum dc_cm2_gpu_mem_layout layout,
- enum dc_cm2_gpu_mem_size size,
- enum hubp_3dlut_fl_mode *mode,
- enum hubp_3dlut_fl_addressing_mode *addr_mode,
- enum hubp_3dlut_fl_width *width)
-{
- *width = hubp_3dlut_fl_width_17;
-
- if (size == DC_CM2_GPU_MEM_SIZE_333333)
- *width = hubp_3dlut_fl_width_33;
-
- switch (layout) {
- case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_RGB:
- *mode = hubp_3dlut_fl_mode_native_1;
- *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
- break;
- case DC_CM2_GPU_MEM_LAYOUT_3D_SWIZZLE_LINEAR_BGR:
- *mode = hubp_3dlut_fl_mode_native_2;
- *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
- break;
- case DC_CM2_GPU_MEM_LAYOUT_1D_PACKED_LINEAR:
- *mode = hubp_3dlut_fl_mode_transform;
- *addr_mode = hubp_3dlut_fl_addressing_mode_simple_linear;
- break;
- default:
- *mode = hubp_3dlut_fl_mode_disable;
- *addr_mode = hubp_3dlut_fl_addressing_mode_sw_linear;
- break;
- }
-}
-
bool dcn42_program_rmcm_luts(
struct hubp *hubp,
struct pipe_ctx *pipe_ctx,
- enum dc_cm2_transfer_func_source lut3d_src,
- struct dc_cm2_func_luts *mcm_luts,
+ const struct dc_plane_cm *cm,
struct mpc *mpc,
- bool lut_bank_a,
int mpcc_id)
{
struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
union mcm_lut_params m_lut_params = {0};
- enum MCM_LUT_XABLE shaper_xable, lut3d_xable = MCM_LUT_DISABLE, lut1d_xable;
- enum hubp_3dlut_fl_mode mode;
- enum hubp_3dlut_fl_addressing_mode addr_mode;
- enum hubp_3dlut_fl_format format = hubp_3dlut_fl_format_unorm_12msb_bitslice;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = hubp_3dlut_fl_crossbar_bit_slice_16_31;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = hubp_3dlut_fl_crossbar_bit_slice_0_15;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = hubp_3dlut_fl_crossbar_bit_slice_32_47;
- enum hubp_3dlut_fl_width width = hubp_3dlut_fl_width_17;
-
struct dc *dc = hubp->ctx->dc;
- struct hubp_fl_3dlut_config fl_config;
struct mpc_fl_3dlut_config mpc_fl_config;
struct dc_stream_state *stream = pipe_ctx->stream;
@@ -575,25 +410,23 @@ bool dcn42_program_rmcm_luts(
// true->false when it can be allocated at DI time
struct dc_rmcm_3dlut *rmcm_3dlut = dc_stream_get_3dlut_for_stream(dc, stream, false);
+ bool lut_bank_a = true; // TODO get from HW
+
//check to see current pipe is part of a stream with allocated rmcm 3dlut
if (!rmcm_3dlut)
return false;
- rmcm_3dlut->protection_bits = mcm_luts->lut3d_data.rmcm_tmz;
-
- dcn42_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
-
/* Shaper */
- if (mcm_luts->shaper) {
+ if (cm->flags.bits.shaper_enable) {
memset(&m_lut_params, 0, sizeof(m_lut_params));
- if (mcm_luts->shaper->type == TF_TYPE_HWPWL) {
- m_lut_params.pwl = &mcm_luts->shaper->pwl;
- } else if (mcm_luts->shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
+ if (cm->shaper_func.type == TF_TYPE_HWPWL) {
+ m_lut_params.pwl = &cm->shaper_func.pwl;
+ } else if (cm->shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) {
ASSERT(false);
cm_helper_translate_curve_to_hw_format(
dc->ctx,
- mcm_luts->shaper,
+ &cm->shaper_func,
&dpp_base->shaper_params, true);
m_lut_params.pwl = &dpp_base->shaper_params;
}
@@ -609,58 +442,21 @@ bool dcn42_program_rmcm_luts(
}
/* 3DLUT */
- switch (lut3d_src) {
- case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM:
+ if (!cm->flags.bits.lut3d_dma_enable) {
memset(&m_lut_params, 0, sizeof(m_lut_params));
// Don't know what to do in this case.
- //case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM:
- break;
- case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
- fl_get_lut_mode(mcm_luts->lut3d_data.gpu_mem_params.layout,
- mcm_luts->lut3d_data.gpu_mem_params.size,
- &mode,
- &addr_mode,
- &width);
-
- if (!dc_is_rmcm_3dlut_supported(hubp, mpc) ||
- !mpc->funcs->rmcm.is_config_supported(
- (width == hubp_3dlut_fl_width_17 ||
- width == hubp_3dlut_fl_width_transformed) ? 17 : 33))
+ } else {
+ if (!dc_is_rmcm_3dlut_supported(hubp, mpc))
return false;
- // setting native or transformed mode,
- dc_get_lut_mode(mcm_luts->lut3d_data.gpu_mem_params.layout, &mode, &addr_mode);
-
//seems to be only for the MCM
- dc_get_lut_format(mcm_luts->lut3d_data.gpu_mem_params.format_params.format, &format);
-
- dc_get_lut_xbar(
- mcm_luts->lut3d_data.gpu_mem_params.component_order,
- &crossbar_bit_slice_cr_r,
- &crossbar_bit_slice_y_g,
- &crossbar_bit_slice_cb_b);
-
- fl_config.mode = mode;
- fl_config.enabled = lut3d_xable != MCM_LUT_DISABLE;
- fl_config.address = mcm_luts->lut3d_data.gpu_mem_params.addr;
- fl_config.format = format;
- fl_config.crossbar_bit_slice_y_g = crossbar_bit_slice_y_g;
- fl_config.crossbar_bit_slice_cb_b = crossbar_bit_slice_cb_b;
- fl_config.crossbar_bit_slice_cr_r = crossbar_bit_slice_cr_r;
- fl_config.width = width;
- fl_config.protection_bits = rmcm_3dlut->protection_bits;
- fl_config.addr_mode = addr_mode;
- fl_config.layout = mcm_luts->lut3d_data.gpu_mem_params.layout;
- fl_config.bias = mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.bias;
- fl_config.scale = mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.scale;
-
- mpc_fl_config.enabled = fl_config.enabled;
- mpc_fl_config.width = width;
+ mpc_fl_config.enabled = cm->flags.bits.lut3d_enable;
+ mpc_fl_config.size = cm->lut3d_dma.size;
mpc_fl_config.select_lut_bank_a = lut_bank_a;
- mpc_fl_config.bit_depth = mcm_luts->lut3d_data.gpu_mem_params.bit_depth;
+ mpc_fl_config.bit_depth = 0;
mpc_fl_config.hubp_index = hubp->inst;
- mpc_fl_config.bias = mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.bias;
- mpc_fl_config.scale = mcm_luts->lut3d_data.gpu_mem_params.format_params.float_params.scale;
+ mpc_fl_config.bias = cm->lut3d_dma.bias;
+ mpc_fl_config.scale = cm->lut3d_dma.scale;
//1. power down the block
mpc->funcs->rmcm.power_on_shaper_3dlut(mpc, mpcc_id, false);
@@ -668,268 +464,44 @@ bool dcn42_program_rmcm_luts(
//2. program RMCM - 3dlut reg programming
mpc->funcs->rmcm.fl_3dlut_configure(mpc, &mpc_fl_config, mpcc_id);
- hubp->funcs->hubp_program_3dlut_fl_config(hubp, &fl_config);
+ /* HUBP */
+ if (hubp->funcs->hubp_program_3dlut_fl_config)
+ hubp->funcs->hubp_program_3dlut_fl_config(hubp, &cm->lut3d_dma);
+
+ if (hubp->funcs->hubp_program_3dlut_fl_addr)
+ hubp->funcs->hubp_program_3dlut_fl_addr(hubp, &cm->lut3d_dma.addr);
//3. power on the block
mpc->funcs->rmcm.power_on_shaper_3dlut(mpc, mpcc_id, true);
-
- break;
- default:
- return false;
}
return true;
}
-void dcn42_populate_mcm_luts(struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- struct dc_cm2_func_luts mcm_luts,
- bool lut_bank_a)
+bool dcn42_set_mcm_luts(struct pipe_ctx *pipe_ctx,
+ const struct dc_plane_state *plane_state)
{
- struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
+ struct dc *dc = pipe_ctx->plane_res.hubp->ctx->dc;
struct hubp *hubp = pipe_ctx->plane_res.hubp;
- int mpcc_id = hubp->inst;
+ const struct dc_plane_cm *cm = &plane_state->cm;
struct mpc *mpc = dc->res_pool->mpc;
- union mcm_lut_params m_lut_params;
- enum dc_cm2_transfer_func_source lut3d_src = mcm_luts.lut3d_data.lut3d_src;
- enum hubp_3dlut_fl_format format = 0;
- enum hubp_3dlut_fl_mode mode;
- enum hubp_3dlut_fl_width width = 0;
- enum hubp_3dlut_fl_addressing_mode addr_mode;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g = 0;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b = 0;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r = 0;
- enum MCM_LUT_XABLE shaper_xable = MCM_LUT_DISABLE;
- enum MCM_LUT_XABLE lut3d_xable = MCM_LUT_DISABLE;
- enum MCM_LUT_XABLE lut1d_xable = MCM_LUT_DISABLE;
- bool rval;
-
- dcn42_get_mcm_lut_xable_from_pipe_ctx(dc, pipe_ctx, &shaper_xable, &lut3d_xable, &lut1d_xable);
-
- //MCM - setting its location (Before/After) blender
- //set to post blend (true)
- dcn42_set_mcm_location_post_blend(
- dc,
- pipe_ctx,
- mcm_luts.lut3d_data.mpc_mcm_post_blend);
-
- //RMCM - 3dLUT+Shaper
- if (mcm_luts.lut3d_data.rmcm_3dlut_enable &&
- is_rmcm_3dlut_fl_supported(dc, mcm_luts.lut3d_data.gpu_mem_params.size)) {
+ int mpcc_id = hubp->inst;
+ bool result;
+
+ /* MCM */
+ result = dcn401_set_mcm_luts(pipe_ctx, plane_state);
+
+ /* RMCM */
+ if (cm->flags.bits.rmcm_enable && cm->flags.bits.lut3d_dma_enable) {
+ /* TODO - move RMCM to its own block */
dcn42_program_rmcm_luts(
hubp,
pipe_ctx,
- lut3d_src,
- &mcm_luts,
+ cm,
mpc,
- lut_bank_a,
mpcc_id);
}
- /* 1D LUT */
- if (mcm_luts.lut1d_func) {
- memset(&m_lut_params, 0, sizeof(m_lut_params));
- if (mcm_luts.lut1d_func->type == TF_TYPE_HWPWL)
- m_lut_params.pwl = &mcm_luts.lut1d_func->pwl;
- else if (mcm_luts.lut1d_func->type == TF_TYPE_DISTRIBUTED_POINTS) {
- rval = cm3_helper_translate_curve_to_hw_format(mpc->ctx,
- mcm_luts.lut1d_func,
- &dpp_base->regamma_params, false);
- m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
- }
- if (m_lut_params.pwl) {
- if (mpc->funcs->populate_lut)
- mpc->funcs->populate_lut(mpc, MCM_LUT_1DLUT, m_lut_params, lut_bank_a, mpcc_id);
- }
- if (mpc->funcs->program_lut_mode)
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, lut1d_xable && m_lut_params.pwl, lut_bank_a, mpcc_id);
- }
-
- /* Shaper */
- if (mcm_luts.shaper && mcm_luts.lut3d_data.mpc_3dlut_enable) {
- memset(&m_lut_params, 0, sizeof(m_lut_params));
- if (mcm_luts.shaper->type == TF_TYPE_HWPWL)
- m_lut_params.pwl = &mcm_luts.shaper->pwl;
- else if (mcm_luts.shaper->type == TF_TYPE_DISTRIBUTED_POINTS) {
- ASSERT(false);
- rval = cm3_helper_translate_curve_to_hw_format(mpc->ctx,
- mcm_luts.shaper,
- &dpp_base->regamma_params, true);
- m_lut_params.pwl = rval ? &dpp_base->regamma_params : NULL;
- }
- if (m_lut_params.pwl) {
- if (mpc->funcs->mcm.populate_lut)
- mpc->funcs->mcm.populate_lut(mpc, m_lut_params, lut_bank_a, mpcc_id);
- if (mpc->funcs->program_lut_mode)
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, MCM_LUT_ENABLE, lut_bank_a, mpcc_id);
- }
- }
-
- /* 3DLUT */
- switch (lut3d_src) {
- case DC_CM2_TRANSFER_FUNC_SOURCE_SYSMEM:
- memset(&m_lut_params, 0, sizeof(m_lut_params));
- if (hubp->funcs->hubp_enable_3dlut_fl)
- hubp->funcs->hubp_enable_3dlut_fl(hubp, false);
-
- if (mcm_luts.lut3d_data.lut3d_func && mcm_luts.lut3d_data.lut3d_func->state.bits.initialized) {
- m_lut_params.lut3d = &mcm_luts.lut3d_data.lut3d_func->lut_3d;
- if (mpc->funcs->populate_lut)
- mpc->funcs->populate_lut(mpc, MCM_LUT_3DLUT, m_lut_params, lut_bank_a, mpcc_id);
- if (mpc->funcs->program_lut_mode)
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a,
- mpcc_id);
- }
- break;
- case DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM:
- switch (mcm_luts.lut3d_data.gpu_mem_params.size) {
- case DC_CM2_GPU_MEM_SIZE_333333:
- width = hubp_3dlut_fl_width_33;
- break;
- case DC_CM2_GPU_MEM_SIZE_171717:
- width = hubp_3dlut_fl_width_17;
- break;
- case DC_CM2_GPU_MEM_SIZE_TRANSFORMED:
- width = hubp_3dlut_fl_width_transformed;
- break;
- default:
- //TODO: Handle default case
- break;
- }
-
- //check for support
- if (mpc->funcs->mcm.is_config_supported &&
- !mpc->funcs->mcm.is_config_supported(width))
- break;
-
- if (mpc->funcs->program_lut_read_write_control)
- mpc->funcs->program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, mpcc_id);
- if (mpc->funcs->program_lut_mode)
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, lut3d_xable, lut_bank_a, mpcc_id);
-
- if (hubp->funcs->hubp_program_3dlut_fl_addr)
- hubp->funcs->hubp_program_3dlut_fl_addr(hubp, mcm_luts.lut3d_data.gpu_mem_params.addr);
-
- if (mpc->funcs->mcm.program_bit_depth)
- mpc->funcs->mcm.program_bit_depth(mpc, mcm_luts.lut3d_data.gpu_mem_params.bit_depth, mpcc_id);
-
- dc_get_lut_mode(mcm_luts.lut3d_data.gpu_mem_params.layout, &mode, &addr_mode);
- if (hubp->funcs->hubp_program_3dlut_fl_mode)
- hubp->funcs->hubp_program_3dlut_fl_mode(hubp, mode);
-
- if (hubp->funcs->hubp_program_3dlut_fl_addressing_mode)
- hubp->funcs->hubp_program_3dlut_fl_addressing_mode(hubp, addr_mode);
-
- switch (mcm_luts.lut3d_data.gpu_mem_params.format_params.format) {
- case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12MSB:
- format = hubp_3dlut_fl_format_unorm_12msb_bitslice;
- break;
- case DC_CM2_GPU_MEM_FORMAT_16161616_UNORM_12LSB:
- format = hubp_3dlut_fl_format_unorm_12lsb_bitslice;
- break;
- case DC_CM2_GPU_MEM_FORMAT_16161616_FLOAT_FP1_5_10:
- format = hubp_3dlut_fl_format_float_fp1_5_10;
- break;
- }
- if (hubp->funcs->hubp_program_3dlut_fl_format)
- hubp->funcs->hubp_program_3dlut_fl_format(hubp, format);
- if (hubp->funcs->hubp_update_3dlut_fl_bias_scale &&
- mpc->funcs->mcm.program_bias_scale) {
- mpc->funcs->mcm.program_bias_scale(mpc,
- mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
- mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale,
- mpcc_id);
- hubp->funcs->hubp_update_3dlut_fl_bias_scale(hubp,
- mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.bias,
- mcm_luts.lut3d_data.gpu_mem_params.format_params.float_params.scale);
- }
-
- //navi 4x has a bug and r and blue are swapped and need to be worked around here in
- //TODO: need to make a method for get_xbar per asic OR do the workaround in program_crossbar for 4x
- dc_get_lut_xbar(
- mcm_luts.lut3d_data.gpu_mem_params.component_order,
- &crossbar_bit_slice_cr_r,
- &crossbar_bit_slice_y_g,
- &crossbar_bit_slice_cb_b);
-
- if (hubp->funcs->hubp_program_3dlut_fl_crossbar)
- hubp->funcs->hubp_program_3dlut_fl_crossbar(hubp,
- crossbar_bit_slice_cr_r,
- crossbar_bit_slice_y_g,
- crossbar_bit_slice_cb_b);
-
- if (mpc->funcs->mcm.program_lut_read_write_control)
- mpc->funcs->mcm.program_lut_read_write_control(mpc, MCM_LUT_3DLUT, lut_bank_a, true, mpcc_id);
-
- if (mpc->funcs->mcm.program_3dlut_size)
- mpc->funcs->mcm.program_3dlut_size(mpc, width, mpcc_id);
-
- if (mpc->funcs->update_3dlut_fast_load_select)
- mpc->funcs->update_3dlut_fast_load_select(mpc, mpcc_id, hubp->inst);
-
- if (hubp->funcs->hubp_enable_3dlut_fl)
- hubp->funcs->hubp_enable_3dlut_fl(hubp, true);
- else {
- if (mpc->funcs->program_lut_mode) {
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_SHAPER, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_3DLUT, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
- mpc->funcs->program_lut_mode(mpc, MCM_LUT_1DLUT, MCM_LUT_DISABLE, lut_bank_a, mpcc_id);
- }
- }
- break;
- }
-}
-
-bool dcn42_set_mcm_luts(struct pipe_ctx *pipe_ctx,
- const struct dc_plane_state *plane_state)
-{
- struct dpp *dpp_base = pipe_ctx->plane_res.dpp;
- int mpcc_id = pipe_ctx->plane_res.hubp->inst;
- struct dc *dc = pipe_ctx->stream_res.opp->ctx->dc;
- struct mpc *mpc = dc->res_pool->mpc;
- bool result;
- const struct pwl_params *lut_params = NULL;
- bool rval;
-
- if (plane_state->mcm_luts.lut3d_data.lut3d_src == DC_CM2_TRANSFER_FUNC_SOURCE_VIDMEM) {
- dcn42_populate_mcm_luts(dc, pipe_ctx, plane_state->mcm_luts, plane_state->lut_bank_a);
- return true;
- }
-
- mpc->funcs->set_movable_cm_location(mpc, MPCC_MOVABLE_CM_LOCATION_BEFORE, mpcc_id);
- pipe_ctx->plane_state->mcm_location = MPCC_MOVABLE_CM_LOCATION_BEFORE;
- // 1D LUT
- if (plane_state->blend_tf.type == TF_TYPE_HWPWL)
- lut_params = &plane_state->blend_tf.pwl;
- else if (plane_state->blend_tf.type == TF_TYPE_DISTRIBUTED_POINTS) {
- rval = cm3_helper_translate_curve_to_hw_format(plane_state->ctx,
- &plane_state->blend_tf,
- &dpp_base->regamma_params, false);
- lut_params = rval ? &dpp_base->regamma_params : NULL;
- }
- result = mpc->funcs->program_1dlut(mpc, lut_params, mpcc_id);
- lut_params = NULL;
-
- // Shaper
- if (plane_state->in_shaper_func.type == TF_TYPE_HWPWL)
- lut_params = &plane_state->in_shaper_func.pwl;
- else if (plane_state->in_shaper_func.type == TF_TYPE_DISTRIBUTED_POINTS) {
- // TODO: dpp_base replace
- rval = cm3_helper_translate_curve_to_hw_format(plane_state->ctx,
- &plane_state->in_shaper_func,
- &dpp_base->shaper_params, true);
- lut_params = rval ? &dpp_base->shaper_params : NULL;
- }
- result &= mpc->funcs->program_shaper(mpc, lut_params, mpcc_id);
-
- // 3D
- if (mpc->funcs->program_3dlut) {
- if (plane_state->lut3d_func.state.bits.initialized == 1)
- result &= mpc->funcs->program_3dlut(mpc, &plane_state->lut3d_func.lut_3d, mpcc_id);
- else
- result &= mpc->funcs->program_3dlut(mpc, NULL, mpcc_id);
- }
-
return result;
}
void dcn42_hardware_release(struct dc *dc)
@@ -1050,7 +622,7 @@ void dcn42_prepare_bandwidth(
dc->hwss.hw_block_power_up(dc, &pg_update_state);
}
- dcn20_prepare_bandwidth(dc, context);
+ dcn401_prepare_bandwidth(dc, context);
}
void dcn42_optimize_bandwidth(struct dc *dc, struct dc_state *context)
@@ -1470,3 +1042,50 @@ void dcn42_dmub_hw_control_lock_fast(union block_sequence_params *params)
dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd);
}
}
+
+/* In headless boot cases, DIG may be turned
+ * on which causes HW/SW discrepancies.
+ * To avoid this, power down hardware on boot
+ * if DIG is turned on
+ */
+void dcn42_power_down_on_boot(struct dc *dc)
+{
+ struct dc_link *edp_links[MAX_NUM_EDP];
+ struct dc_link *edp_link = NULL;
+ int edp_num;
+ int i = 0;
+
+ dc_get_edp_links(dc, edp_links, &edp_num);
+ if (edp_num)
+ edp_link = edp_links[0];
+
+ if (edp_link && edp_link->link_enc->funcs->is_dig_enabled &&
+ edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc) &&
+ dc->hwseq->funcs.edp_backlight_control &&
+ dc->hwseq->funcs.power_down &&
+ dc->hwss.edp_power_control) {
+ dc->hwseq->funcs.edp_backlight_control(edp_link, false);
+ dc->hwseq->funcs.power_down(dc);
+ dc->hwss.edp_power_control(edp_link, false);
+ } else {
+ for (i = 0; i < dc->link_count; i++) {
+ struct dc_link *link = dc->links[i];
+
+ if (link->link_enc && link->link_enc->funcs->is_dig_enabled &&
+ link->link_enc->funcs->is_dig_enabled(link->link_enc) &&
+ dc->hwseq->funcs.power_down) {
+ dc->hwseq->funcs.power_down(dc);
+ break;
+ }
+
+ }
+ }
+
+ /*
+ * Call update_clocks with empty context
+ * to send DISPLAY_OFF
+ * Otherwise DISPLAY_OFF may not be asserted
+ */
+ if (dc->clk_mgr->funcs->set_low_power_state)
+ dc->clk_mgr->funcs->set_low_power_state(dc->clk_mgr);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.h
index 89ebb6520eaf..c4cfeed45b19 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_hwseq.h
@@ -18,18 +18,11 @@ void dcn42_program_cm_hist(
bool dcn42_set_mcm_luts(struct pipe_ctx *pipe_ctx,
const struct dc_plane_state *plane_state);
-void dcn42_populate_mcm_luts(struct dc *dc,
- struct pipe_ctx *pipe_ctx,
- struct dc_cm2_func_luts mcm_luts,
- bool lut_bank_a);
-
bool dcn42_program_rmcm_luts(
struct hubp *hubp,
struct pipe_ctx *pipe_ctx,
- enum dc_cm2_transfer_func_source lut3d_src,
- struct dc_cm2_func_luts *mcm_luts,
+ const struct dc_plane_cm *cm,
struct mpc *mpc,
- bool lut_bank_a,
int mpcc_id);
void dcn42_hardware_release(struct dc *dc);
@@ -50,4 +43,5 @@ void dcn42_root_clock_control(struct dc *dc,
void dcn42_dmub_hw_control_lock(struct dc *dc, struct dc_state *context, bool lock);
void dcn42_dmub_hw_control_lock_fast(union block_sequence_params *params);
void dcn42_setup_stereo(struct pipe_ctx *pipe_ctx, struct dc *dc);
+void dcn42_power_down_on_boot(struct dc *dc);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_init.c
index a8e2f59d5e50..b324a2195e8a 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_init.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn42/dcn42_init.c
@@ -19,7 +19,7 @@ static const struct hw_sequencer_funcs dcn42_funcs = {
.program_gamut_remap = dcn401_program_gamut_remap,
.init_hw = dcn42_init_hw,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
- .power_down_on_boot = dcn35_power_down_on_boot,
+ .power_down_on_boot = dcn42_power_down_on_boot,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn401_program_front_end_for_ctx,
.clear_surface_dcc_and_tiling = dcn10_reset_surface_dcc_and_tiling,
@@ -64,6 +64,12 @@ static const struct hw_sequencer_funcs dcn42_funcs = {
.set_cursor_position = dcn401_set_cursor_position,
.set_cursor_attribute = dcn10_set_cursor_attribute,
.set_cursor_sdr_white_level = dcn10_set_cursor_sdr_white_level,
+ .abort_cursor_offload_update = dcn35_abort_cursor_offload_update,
+ .begin_cursor_offload_update = dcn35_begin_cursor_offload_update,
+ .commit_cursor_offload_update = dcn35_commit_cursor_offload_update,
+ .update_cursor_offload_pipe = dcn401_update_cursor_offload_pipe,
+ .notify_cursor_offload_drr_update = dcn35_notify_cursor_offload_drr_update,
+ .program_cursor_offload_now = dcn35_program_cursor_offload_now,
.setup_periodic_interrupt = dcn10_setup_periodic_interrupt,
.set_clock = dcn10_set_clock,
.get_clock = dcn10_get_clock,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index 91eba1985bab..21224fd6b36d 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -215,7 +215,7 @@ struct clk_state_registers_and_bypass {
uint32_t dcfclk_bypass;
uint32_t dprefclk_bypass;
uint32_t dispclk_bypass;
- uint32_t timer_threhold;
+ uint32_t timer_threshold;
};
struct rv1_clk_internal {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 9e53eacee3f8..c69ccfcebeb5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -243,7 +243,23 @@ enum dentist_divider_range {
CLK_SR_DCN42(CLK8_CLK3_DS_CNTL), \
CLK_SR_DCN42(CLK8_CLK4_DS_CNTL)
-#define CLK_COMMON_MASK_SH_LIST_DCN42(mask_sh) 0
+#define CLK_COMMON_MASK_SH_LIST_DCN42(mask_sh) \
+ CLK_SF(CLK8_CLK_TICK_CNT_CONFIG_REG, TIMER_THRESHOLD, mask_sh), \
+ CLK_SF(CLK8_CLK0_BYPASS_CNTL, CLK0_BYPASS_SEL, mask_sh), \
+ CLK_SF(CLK8_CLK1_BYPASS_CNTL, CLK1_BYPASS_SEL, mask_sh), \
+ CLK_SF(CLK8_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, mask_sh), \
+ CLK_SF(CLK8_CLK3_BYPASS_CNTL, CLK3_BYPASS_SEL, mask_sh), \
+ CLK_SF(CLK8_CLK4_BYPASS_CNTL, CLK4_BYPASS_SEL, mask_sh), \
+ CLK_SF(CLK8_CLK0_DS_CNTL, CLK0_DS_DIV_ID, mask_sh), \
+ CLK_SF(CLK8_CLK1_DS_CNTL, CLK1_DS_DIV_ID, mask_sh), \
+ CLK_SF(CLK8_CLK2_DS_CNTL, CLK2_DS_DIV_ID, mask_sh), \
+ CLK_SF(CLK8_CLK3_DS_CNTL, CLK3_DS_DIV_ID, mask_sh), \
+ CLK_SF(CLK8_CLK4_DS_CNTL, CLK4_DS_DIV_ID, mask_sh), \
+ CLK_SF(CLK8_CLK0_DS_CNTL, CLK0_ALLOW_DS, mask_sh), \
+ CLK_SF(CLK8_CLK1_DS_CNTL, CLK1_ALLOW_DS, mask_sh), \
+ CLK_SF(CLK8_CLK2_DS_CNTL, CLK2_ALLOW_DS, mask_sh), \
+ CLK_SF(CLK8_CLK3_DS_CNTL, CLK3_ALLOW_DS, mask_sh), \
+ CLK_SF(CLK8_CLK4_DS_CNTL, CLK4_ALLOW_DS, mask_sh), \
@@ -259,6 +275,42 @@ enum dentist_divider_range {
type FbMult_int; \
type FbMult_frac;
+#define CLK42_REG_LIST(clkip_num, type) \
+ type CLK ## clkip_num ## _CLK_TICK_CNT_CONFIG_REG; \
+ type CLK ## clkip_num ## _CLK0_CURRENT_CNT; \
+ type CLK ## clkip_num ## _CLK1_CURRENT_CNT; \
+ type CLK ## clkip_num ## _CLK2_CURRENT_CNT; \
+ type CLK ## clkip_num ## _CLK3_CURRENT_CNT; \
+ type CLK ## clkip_num ## _CLK4_CURRENT_CNT; \
+ type CLK ## clkip_num ## _CLK0_BYPASS_CNTL; \
+ type CLK ## clkip_num ## _CLK1_BYPASS_CNTL; \
+ type CLK ## clkip_num ## _CLK2_BYPASS_CNTL; \
+ type CLK ## clkip_num ## _CLK3_BYPASS_CNTL; \
+ type CLK ## clkip_num ## _CLK4_BYPASS_CNTL; \
+ type CLK ## clkip_num ## _CLK0_DS_CNTL; \
+ type CLK ## clkip_num ## _CLK1_DS_CNTL; \
+ type CLK ## clkip_num ## _CLK2_DS_CNTL; \
+ type CLK ## clkip_num ## _CLK3_DS_CNTL; \
+ type CLK ## clkip_num ## _CLK4_DS_CNTL;
+
+#define CLK42_REG_FIELD_LIST(type) \
+ type TIMER_THRESHOLD; \
+ type CLK0_BYPASS_SEL; \
+ type CLK1_BYPASS_SEL; \
+ type CLK2_BYPASS_SEL; \
+ type CLK3_BYPASS_SEL; \
+ type CLK4_BYPASS_SEL; \
+ type CLK0_DS_DIV_ID; \
+ type CLK1_DS_DIV_ID; \
+ type CLK2_DS_DIV_ID; \
+ type CLK3_DS_DIV_ID; \
+ type CLK4_DS_DIV_ID; \
+ type CLK0_ALLOW_DS; \
+ type CLK1_ALLOW_DS; \
+ type CLK2_ALLOW_DS; \
+ type CLK3_ALLOW_DS; \
+ type CLK4_ALLOW_DS;
+
/*
***************************************************************************************
****************** Clock Manager Private Structures ***********************************
@@ -322,32 +374,19 @@ struct clk_mgr_registers {
uint32_t CLK1_CLK5_ALLOW_DS;
uint32_t CLK5_spll_field_8;
uint32_t CLK6_spll_field_8;
- uint32_t CLK8_CLK0_CURRENT_CNT;
- uint32_t CLK8_CLK1_CURRENT_CNT;
- uint32_t CLK8_CLK2_CURRENT_CNT;
- uint32_t CLK8_CLK3_CURRENT_CNT;
- uint32_t CLK8_CLK4_CURRENT_CNT;
- uint32_t CLK8_CLK0_DS_CNTL;
- uint32_t CLK8_CLK1_DS_CNTL;
- uint32_t CLK8_CLK2_DS_CNTL;
- uint32_t CLK8_CLK3_DS_CNTL;
- uint32_t CLK8_CLK4_DS_CNTL;
- uint32_t CLK8_CLK0_BYPASS_CNTL;
- uint32_t CLK8_CLK1_BYPASS_CNTL;
- uint32_t CLK8_CLK2_BYPASS_CNTL;
- uint32_t CLK8_CLK3_BYPASS_CNTL;
- uint32_t CLK8_CLK4_BYPASS_CNTL;
- uint32_t CLK8_CLK_TICK_CNT_CONFIG_REG;
+ CLK42_REG_LIST(8, uint32_t)
};
struct clk_mgr_shift {
CLK_REG_FIELD_LIST(uint8_t)
CLK20_REG_FIELD_LIST(uint8_t)
+ CLK42_REG_FIELD_LIST(uint8_t)
};
struct clk_mgr_mask {
CLK_REG_FIELD_LIST(uint32_t)
CLK20_REG_FIELD_LIST(uint32_t)
+ CLK42_REG_FIELD_LIST(uint32_t)
};
enum clock_type {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
index a79019365af8..2a5a81d15950 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hubp.h
@@ -89,7 +89,7 @@ enum hubp_3dlut_fl_addressing_mode {
enum hubp_3dlut_fl_width {
hubp_3dlut_fl_width_17 = 17,
hubp_3dlut_fl_width_33 = 33,
- hubp_3dlut_fl_width_transformed = 4916, //mpc default
+ hubp_3dlut_fl_width_17_transformed = 4916, //mpc default
};
enum hubp_3dlut_fl_crossbar_bit_slice {
@@ -99,22 +99,6 @@ enum hubp_3dlut_fl_crossbar_bit_slice {
hubp_3dlut_fl_crossbar_bit_slice_48_63 = 3
};
-struct hubp_fl_3dlut_config {
- bool enabled;
- enum hubp_3dlut_fl_width width;
- enum hubp_3dlut_fl_mode mode;
- enum hubp_3dlut_fl_format format;
- uint16_t bias;
- uint16_t scale;
- struct dc_plane_address address;
- enum hubp_3dlut_fl_addressing_mode addr_mode;
- enum dc_cm2_gpu_mem_layout layout;
- uint8_t protection_bits;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_y_g;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cb_b;
- enum hubp_3dlut_fl_crossbar_bit_slice crossbar_bit_slice_cr_r;
-};
-
struct hubp {
const struct hubp_funcs *funcs;
struct dc_context *ctx;
@@ -243,7 +227,6 @@ struct hubp_funcs {
void (*hubp_disable_control)(struct hubp *hubp, bool disable_hubp);
unsigned int (*hubp_get_underflow_status)(struct hubp *hubp);
void (*hubp_init)(struct hubp *hubp);
-
void (*dmdata_set_attributes)(
struct hubp *hubp,
const struct dc_dmdata_attributes *attr);
@@ -290,24 +273,15 @@ struct hubp_funcs {
void (*hubp_wait_pipe_read_start)(struct hubp *hubp);
void (*hubp_program_mcache_id_and_split_coordinate)(struct hubp *hubp, struct dml2_hubp_pipe_mcache_regs *mcache_regs);
- void (*hubp_update_3dlut_fl_bias_scale)(struct hubp *hubp, uint16_t bias, uint16_t scale);
- void (*hubp_program_3dlut_fl_mode)(struct hubp *hubp,
- enum hubp_3dlut_fl_mode mode);
- void (*hubp_program_3dlut_fl_format)(struct hubp *hubp,
- enum hubp_3dlut_fl_format format);
void (*hubp_program_3dlut_fl_addr)(struct hubp *hubp,
- const struct dc_plane_address address);
+ const struct dc_plane_address *address);
+ void (*hubp_program_3dlut_fl_config)(struct hubp *hubp,
+ const struct dc_3dlut_dma *config);
void (*hubp_program_3dlut_fl_dlg_param)(struct hubp *hubp, int refcyc_per_3dlut_group);
void (*hubp_enable_3dlut_fl)(struct hubp *hubp, bool enable);
- void (*hubp_program_3dlut_fl_addressing_mode)(struct hubp *hubp, enum hubp_3dlut_fl_addressing_mode addr_mode);
- void (*hubp_program_3dlut_fl_width)(struct hubp *hubp, enum hubp_3dlut_fl_width width);
- void (*hubp_program_3dlut_fl_tmz_protected)(struct hubp *hubp, uint8_t protection_bits);
void (*hubp_program_3dlut_fl_crossbar)(struct hubp *hubp,
- enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_y_g,
- enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cb_b,
- enum hubp_3dlut_fl_crossbar_bit_slice bit_slice_cr_r);
+ enum dc_cm_lut_pixel_format format);
int (*hubp_get_3dlut_fl_done)(struct hubp *hubp);
- void (*hubp_program_3dlut_fl_config)(struct hubp *hubp, struct hubp_fl_3dlut_config *cfg);
void (*hubp_clear_tiling)(struct hubp *hubp);
uint32_t (*hubp_get_current_read_line)(struct hubp *hubp);
uint32_t (*hubp_get_det_config_error)(struct hubp *hubp);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
index a61d12ec61bc..b152f6879495 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h
@@ -152,6 +152,13 @@ struct dc_rgb {
uint32_t blue;
};
+struct tetrahedral_33x33x33 {
+ struct dc_rgb lut0[8985];
+ struct dc_rgb lut1[8984];
+ struct dc_rgb lut2[8984];
+ struct dc_rgb lut3[8984];
+};
+
struct tetrahedral_17x17x17 {
struct dc_rgb lut0[1229];
struct dc_rgb lut1[1228];
@@ -165,14 +172,23 @@ struct tetrahedral_9x9x9 {
struct dc_rgb lut3[182];
};
+enum lut_dimension {
+ LUT_DIM_INVALID = 0,
+ LUT_DIM_9 = 9,
+ LUT_DIM_17 = 17,
+ LUT_DIM_33 = 33,
+};
+
struct tetrahedral_params {
union {
+//TODO: Uncomment when in use.
+// struct tetrahedral_33x33x33 tetrahedral_33;
struct tetrahedral_17x17x17 tetrahedral_17;
struct tetrahedral_9x9x9 tetrahedral_9;
};
bool use_tetrahedral_9;
bool use_12bits;
-
+ enum lut_dimension lut_dim;
};
/* arr_curve_points - regamma regions/segments specification
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
index 0db607f2a410..f5617674bea8 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/mpc.h
@@ -54,6 +54,7 @@
#include "dc_hw_types.h"
#include "hw_shared.h"
#include "transform.h"
+#include "dc_types.h"
#define MAX_MPCC 6
#define MAX_OPP 6
@@ -66,7 +67,6 @@ enum mpc_output_csc_mode {
MPC_OUTPUT_CSC_COEF_B
};
-
enum mpcc_blend_mode {
MPCC_BLEND_MODE_BYPASS,
MPCC_BLEND_MODE_TOP_LAYER_PASSTHROUGH,
@@ -102,13 +102,6 @@ enum mpcc_movable_cm_location {
MPCC_MOVABLE_CM_LOCATION_AFTER,
};
-enum MCM_LUT_XABLE {
- MCM_LUT_DISABLE,
- MCM_LUT_DISABLED = MCM_LUT_DISABLE,
- MCM_LUT_ENABLE,
- MCM_LUT_ENABLED = MCM_LUT_ENABLE,
-};
-
enum MCM_LUT_ID {
MCM_LUT_3DLUT,
MCM_LUT_1DLUT,
@@ -117,7 +110,7 @@ enum MCM_LUT_ID {
struct mpc_fl_3dlut_config {
bool enabled;
- uint16_t width;
+ enum dc_cm_lut_size size;
bool select_lut_bank_a;
uint16_t bit_depth;
int hubp_index;
@@ -1042,22 +1035,22 @@ struct mpc_funcs {
void (*update_3dlut_fast_load_select)(struct mpc *mpc, int mpcc_id, int hubp_idx);
-/**
- * @get_3dlut_fast_load_status:
- *
- * Get 3D LUT fast load status and reference them with done, soft_underflow and hard_underflow pointers.
- *
- * Parameters:
- * - [in/out] mpc - MPC context.
- * - [in] mpcc_id
- * - [in/out] done
- * - [in/out] soft_underflow
- * - [in/out] hard_underflow
- *
- * Return:
- *
- * void
- */
+ /**
+ * @get_3dlut_fast_load_status:
+ *
+ * Get 3D LUT fast load status and reference them with done, soft_underflow and hard_underflow pointers.
+ *
+ * Parameters:
+ * - [in/out] mpc - MPC context.
+ * - [in] mpcc_id
+ * - [in/out] done
+ * - [in/out] soft_underflow
+ * - [in/out] hard_underflow
+ *
+ * Return:
+ *
+ * void
+ */
void (*get_3dlut_fast_load_status)(struct mpc *mpc, int mpcc_id, uint32_t *done, uint32_t *soft_underflow, uint32_t *hard_underflow);
/**
@@ -1076,8 +1069,11 @@ struct mpc_funcs {
*
* void
*/
- void (*populate_lut)(struct mpc *mpc, const enum MCM_LUT_ID id, const union mcm_lut_params params,
- bool lut_bank_a, int mpcc_id);
+ void (*populate_lut)(struct mpc *mpc,
+ const enum MCM_LUT_ID id,
+ const union mcm_lut_params *params,
+ const bool lut_bank_a,
+ const int mpcc_id);
/**
* @program_lut_read_write_control:
@@ -1088,13 +1084,18 @@ struct mpc_funcs {
* - [in/out] mpc - MPC context.
* - [in] id
* - [in] lut_bank_a
+ * - [in] bit_depth
* - [in] mpcc_id
*
* Return:
*
* void
*/
- void (*program_lut_read_write_control)(struct mpc *mpc, const enum MCM_LUT_ID id, bool lut_bank_a, int mpcc_id);
+ void (*program_lut_read_write_control)(struct mpc *mpc,
+ const enum MCM_LUT_ID id,
+ const bool lut_bank_a,
+ const unsigned int bit_depth,
+ const int mpcc_id);
/**
* @program_lut_mode:
@@ -1104,33 +1105,44 @@ struct mpc_funcs {
* Parameters:
* - [in/out] mpc - MPC context.
* - [in] id
- * - [in] xable
+ * - [in] enable
* - [in] lut_bank_a
+ * - [in] size
* - [in] mpcc_id
*
* Return:
*
* void
*/
- void (*program_lut_mode)(struct mpc *mpc, const enum MCM_LUT_ID id, const enum MCM_LUT_XABLE xable,
- bool lut_bank_a, int mpcc_id);
+ void (*program_lut_mode)(struct mpc *mpc,
+ const enum MCM_LUT_ID id,
+ const bool enable,
+ const bool lut_bank_a,
+ const enum dc_cm_lut_size size,
+ const int mpcc_id);
- /**
- * @mcm:
- *
- * MPC MCM new HW sequential programming functions
- */
- struct {
- void (*program_3dlut_size)(struct mpc *mpc, uint32_t width, int mpcc_id);
- void (*program_bias_scale)(struct mpc *mpc, uint16_t bias, uint16_t scale, int mpcc_id);
- void (*program_bit_depth)(struct mpc *mpc, uint16_t bit_depth, int mpcc_id);
- bool (*is_config_supported)(uint32_t width);
- void (*program_lut_read_write_control)(struct mpc *mpc, const enum MCM_LUT_ID id,
- bool lut_bank_a, bool enabled, int mpcc_id);
- void (*populate_lut)(struct mpc *mpc, const union mcm_lut_params params,
- bool lut_bank_a, int mpcc_id);
- } mcm;
+ /**
+ * @get_lut_mode:
+ *
+ * Obtains enablement and ram bank status.
+ *
+ * Parameters:
+ * - [in/out] mpc - MPC context.
+ * - [in] id
+ * - [in] mpcc_id
+ * - [out] enable
+ * - [out] lut_bank_a
+ *
+ * Return:
+ *
+ * void
+ */
+ void (*get_lut_mode)(struct mpc *mpc,
+ const enum MCM_LUT_ID id,
+ const int mpcc_id,
+ bool *enable,
+ bool *lut_bank_a);
/**
* @rmcm:
@@ -1143,9 +1155,11 @@ struct mpc_funcs {
void (*update_3dlut_fast_load_select)(struct mpc *mpc, int mpcc_id, int hubp_idx);
void (*program_lut_read_write_control)(struct mpc *mpc, const enum MCM_LUT_ID id,
bool lut_bank_a, bool enabled, int mpcc_id);
- void (*program_lut_mode)(struct mpc *mpc, const enum MCM_LUT_XABLE xable,
- bool lut_bank_a, int mpcc_id);
- void (*program_3dlut_size)(struct mpc *mpc, uint32_t width, int mpcc_id);
+ void (*program_lut_mode)(struct mpc *mpc,
+ bool enable,
+ bool lut_bank_a,
+ int mpcc_id);
+ void (*program_3dlut_size)(struct mpc *mpc, const enum dc_cm_lut_size size, int mpcc_id);
void (*program_bias_scale)(struct mpc *mpc, uint16_t bias, uint16_t scale, int mpcc_id);
void (*program_bit_depth)(struct mpc *mpc, uint16_t bit_depth, int mpcc_id);
bool (*is_config_supported)(uint32_t width);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 79746d931471..cecd3282a29f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -60,6 +60,7 @@ struct resource_caps {
int num_hpo_dp_stream_encoder;
int num_hpo_dp_link_encoder;
int num_mpc_3dlut;
+ int num_rmcm;
};
struct resource_straps {
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
index d0bb26888f4b..f992c2d16748 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c
@@ -352,7 +352,7 @@ static void query_dp_dual_mode_adaptor(
*dongle = DISPLAY_DONGLE_DP_DVI_DONGLE;
sink_cap->max_hdmi_pixel_clock = DP_ADAPTOR_DVI_MAX_TMDS_CLK;
- CONN_DATA_DETECT(ddc->link, type2_dongle_buf, sizeof(type2_dongle_buf),
+ CONN_DATA_DETECT(link, type2_dongle_buf, sizeof(type2_dongle_buf),
"DP-DVI passive dongle %dMhz: ",
DP_ADAPTOR_DVI_MAX_TMDS_CLK / 1000);
return;
@@ -657,8 +657,6 @@ static bool wait_for_entering_dp_alt_mode(struct dc_link *link)
unsigned long long time_taken_in_ns;
int tries_taken;
- DC_LOGGER_INIT(link->ctx->logger);
-
/**
* this function will only exist if we are on dcn21 (is_in_alt_mode is a
* function pointer, so checking to see if it is equal to 0 is the same
@@ -729,8 +727,6 @@ static void revert_dpia_mst_dsc_always_on_wa(struct dc_link *link)
static bool discover_dp_mst_topology(struct dc_link *link, enum dc_detect_reason reason)
{
- DC_LOGGER_INIT(link->ctx->logger);
-
LINK_INFO("link=%d, mst branch is now Connected\n",
link->link_index);
@@ -750,8 +746,6 @@ static bool discover_dp_mst_topology(struct dc_link *link, enum dc_detect_reason
bool link_reset_cur_dp_mst_topology(struct dc_link *link)
{
- DC_LOGGER_INIT(link->ctx->logger);
-
LINK_INFO("link=%d, mst branch is now Disconnected\n",
link->link_index);
@@ -977,8 +971,6 @@ static bool detect_link_and_local_sink(struct dc_link *link,
enum dc_connection_type new_connection_type = dc_connection_none;
const uint32_t post_oui_delay = 30; // 30ms
- DC_LOGGER_INIT(link->ctx->logger);
-
if (dc_is_virtual_signal(link->connector_signal))
return false;
@@ -1459,8 +1451,6 @@ bool link_detect(struct dc_link *link, enum dc_detect_reason reason)
bool is_delegated_to_mst_top_mgr = false;
enum dc_connection_type pre_link_type = link->type;
- DC_LOGGER_INIT(link->ctx->logger);
-
is_local_sink_detect_success = detect_link_and_local_sink(link, reason);
if (is_local_sink_detect_success && link->local_sink)
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
index cc18a3bebef2..1860d44f63c1 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
@@ -223,9 +223,10 @@ static void handle_hpd_irq_vesa_replay_sink(struct dc_link *link)
}
}
-static void handle_hpd_irq_replay_sink(struct dc_link *link)
+static void handle_hpd_irq_replay_sink(struct dc_link *link, bool *need_re_enable)
{
union dpcd_replay_configuration replay_configuration = {0};
+ union dpcd_replay_configuration replay_sink_status = {0};
/*AMD Replay version reuse DP_PSR_ERROR_STATUS for REPLAY_ERROR status.*/
union psr_error_status replay_error_status = {0};
bool ret = false;
@@ -265,9 +266,17 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link)
&replay_error_status.raw,
sizeof(replay_error_status.raw));
+ dm_helpers_dp_read_dpcd(
+ link->ctx,
+ link,
+ DP_PR_REPLAY_SINK_STATUS,
+ &replay_sink_status.raw,
+ 1);
+
if (replay_error_status.bits.LINK_CRC_ERROR ||
replay_configuration.bits.DESYNC_ERROR_STATUS ||
- replay_configuration.bits.STATE_TRANSITION_ERROR_STATUS) {
+ replay_configuration.bits.STATE_TRANSITION_ERROR_STATUS ||
+ replay_sink_status.bits.SINK_DEVICE_REPLAY_STATUS == 0x7) {
bool allow_active;
link->replay_settings.config.replay_error_status.raw |= replay_error_status.raw;
@@ -299,8 +308,7 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link)
if (link->replay_settings.replay_allow_active) {
allow_active = false;
edp_set_replay_allow_active(link, &allow_active, true, false, NULL);
- allow_active = true;
- edp_set_replay_allow_active(link, &allow_active, true, false, NULL);
+ *need_re_enable = true;
}
}
}
@@ -460,6 +468,7 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
union device_service_irq device_service_clear = {0};
enum dc_status result;
bool status = false;
+ bool replay_re_enable_needed = false;
if (out_link_loss)
*out_link_loss = false;
@@ -519,7 +528,7 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
/* PSR-related error was detected and handled */
return true;
- handle_hpd_irq_replay_sink(link);
+ handle_hpd_irq_replay_sink(link, &replay_re_enable_needed);
/* If PSR-related error handled, Main link may be off,
* so do not handle as a normal sink status change interrupt.
@@ -538,16 +547,16 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
return false;
}
- /* For now we only handle 'Downstream port status' case.
+ /* Handle 'Downstream port status' case for all DP link types.
* If we got sink count changed it means
* Downstream port status changed,
* then DM should call DC to do the detection.
- * NOTE: Do not handle link loss on eDP since it is internal link
+ * NOTE: Now includes eDP link loss detection and retraining
*/
- if ((link->connector_signal != SIGNAL_TYPE_EDP) &&
- dp_parse_link_loss_status(
- link,
- &hpd_irq_dpcd_data)) {
+
+ if (dp_parse_link_loss_status(
+ link,
+ &hpd_irq_dpcd_data)) {
/* Connectivity log: link loss */
CONN_DATA_LINK_LOSS(link,
hpd_irq_dpcd_data.raw,
@@ -576,6 +585,11 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
!= link->dpcd_sink_count)
status = true;
+ if (replay_re_enable_needed) {
+ bool allow_active = true;
+
+ edp_set_replay_allow_active(link, &allow_active, true, false, NULL);
+ }
/* reasons for HPD RX:
* 1. Link Loss - ie Re-train the Link
* 2. MST sideband message
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_panel_replay.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_panel_replay.c
index 6661078c0241..96afce4ffbfa 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_panel_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_panel_replay.c
@@ -252,23 +252,24 @@ bool dp_pr_enable(struct dc_link *link, bool enable)
if (!dp_pr_get_panel_inst(dc, link, &panel_inst))
return false;
+ if (link->replay_settings.replay_allow_active == enable)
+ return true;
+
if (enable && !dc_is_embedded_signal(link->connector_signal))
dp_pr_set_static_screen_param(link);
- if (link->replay_settings.replay_allow_active != enable) {
- //for sending PR enable commands to DMUB
- memset(&cmd, 0, sizeof(cmd));
+ // for sending PR enable commands to DMUB
+ memset(&cmd, 0, sizeof(cmd));
- cmd.pr_enable.header.type = DMUB_CMD__PR;
- cmd.pr_enable.header.sub_type = DMUB_CMD__PR_ENABLE;
- cmd.pr_enable.header.payload_bytes = sizeof(struct dmub_cmd_pr_enable_data);
- cmd.pr_enable.data.panel_inst = panel_inst;
- cmd.pr_enable.data.enable = enable ? 1 : 0;
+ cmd.pr_enable.header.type = DMUB_CMD__PR;
+ cmd.pr_enable.header.sub_type = DMUB_CMD__PR_ENABLE;
+ cmd.pr_enable.header.payload_bytes = sizeof(struct dmub_cmd_pr_enable_data);
+ cmd.pr_enable.data.panel_inst = panel_inst;
+ cmd.pr_enable.data.enable = enable ? 1 : 0;
+ dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
- dc_wake_and_execute_dmub_cmd(dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ link->replay_settings.replay_allow_active = enable;
- link->replay_settings.replay_allow_active = enable;
- }
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
index 5b2c1a4911cf..4a2699a374b7 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c
@@ -847,6 +847,7 @@ bool edp_setup_psr(struct dc_link *link,
case FAMILY_YELLOW_CARP:
case AMDGPU_FAMILY_GC_10_3_6:
case AMDGPU_FAMILY_GC_11_0_1:
+ case AMDGPU_FAMILY_GC_11_5_4:
if (dc->debug.disable_z10 || dc->debug.psr_skip_crtc_disable)
psr_context->psr_level.bits.SKIP_CRTC_DISABLE = true;
break;
@@ -1094,8 +1095,6 @@ bool edp_send_replay_cmd(struct dc_link *link,
if (!replay)
return false;
- DC_LOGGER_INIT(link->ctx->logger);
-
if (dp_pr_get_panel_inst(dc, link, &panel_inst))
cmd_data->panel_inst = panel_inst;
else {
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c
index b23c64004dd5..27e653234850 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn10/dcn10_mpc.c
@@ -118,9 +118,7 @@ void mpc1_assert_idle_mpcc(struct mpc *mpc, int id)
struct mpcc *mpc1_get_mpcc(struct mpc *mpc, int mpcc_id)
{
- struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
-
- ASSERT(mpcc_id < mpc10->num_mpcc);
+ ASSERT(mpcc_id < TO_DCN10_MPC(mpc)->num_mpcc);
return &(mpc->mpcc_array[mpcc_id]);
}
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
index ce1ee2062e41..e0617db2d0c1 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.c
@@ -73,56 +73,15 @@ void mpc401_set_movable_cm_location(struct mpc *mpc, enum mpcc_movable_cm_locati
}
}
-static enum dc_lut_mode get3dlut_config(
- struct mpc *mpc,
- bool *is_17x17x17,
- bool *is_12bits_color_channel,
- int mpcc_id)
-{
- uint32_t i_mode, i_enable_10bits, lut_size;
- enum dc_lut_mode mode;
- struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
-
- REG_GET(MPCC_MCM_3DLUT_MODE[mpcc_id],
- MPCC_MCM_3DLUT_MODE_CURRENT, &i_mode);
-
- REG_GET(MPCC_MCM_3DLUT_READ_WRITE_CONTROL[mpcc_id],
- MPCC_MCM_3DLUT_30BIT_EN, &i_enable_10bits);
-
- switch (i_mode) {
- case 0:
- mode = LUT_BYPASS;
- break;
- case 1:
- mode = LUT_RAM_A;
- break;
- case 2:
- mode = LUT_RAM_B;
- break;
- default:
- mode = LUT_BYPASS;
- break;
- }
- if (i_enable_10bits > 0)
- *is_12bits_color_channel = false;
- else
- *is_12bits_color_channel = true;
-
- REG_GET(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_SIZE, &lut_size);
-
- if (lut_size == 0)
- *is_17x17x17 = true;
- else
- *is_17x17x17 = false;
-
- return mode;
-}
-
-void mpc401_populate_lut(struct mpc *mpc, const enum MCM_LUT_ID id, const union mcm_lut_params params, bool lut_bank_a, int mpcc_id)
+void mpc401_populate_lut(struct mpc *mpc,
+ const enum MCM_LUT_ID id,
+ const union mcm_lut_params *params,
+ const bool lut_bank_a,
+ const int mpcc_id)
{
const enum dc_lut_mode next_mode = lut_bank_a ? LUT_RAM_A : LUT_RAM_B;
- const struct pwl_params *lut1d = params.pwl;
- const struct pwl_params *lut_shaper = params.pwl;
+ const struct pwl_params *lut1d = params->pwl;
+ const struct pwl_params *lut_shaper = params->pwl;
bool is_17x17x17;
bool is_12bits_color_channel;
const struct dc_rgb *lut0;
@@ -131,7 +90,7 @@ void mpc401_populate_lut(struct mpc *mpc, const enum MCM_LUT_ID id, const union
const struct dc_rgb *lut3;
int lut_size0;
int lut_size;
- const struct tetrahedral_params *lut3d = params.lut3d;
+ const struct tetrahedral_params *lut3d = params->lut3d;
switch (id) {
case MCM_LUT_1DLUT:
@@ -174,8 +133,6 @@ void mpc401_populate_lut(struct mpc *mpc, const enum MCM_LUT_ID id, const union
mpc32_power_on_shaper_3dlut(mpc, mpcc_id, true);
- get3dlut_config(mpc, &is_17x17x17, &is_12bits_color_channel, mpcc_id);
-
is_17x17x17 = !lut3d->use_tetrahedral_9;
is_12bits_color_channel = lut3d->use_12bits;
if (is_17x17x17) {
@@ -198,8 +155,6 @@ void mpc401_populate_lut(struct mpc *mpc, const enum MCM_LUT_ID id, const union
sizeof(lut3d->tetrahedral_9.lut1[0]);
}
- mpc32_select_3dlut_ram(mpc, next_mode,
- is_12bits_color_channel, mpcc_id);
mpc32_select_3dlut_ram_mask(mpc, 0x1, mpcc_id);
if (is_12bits_color_channel)
mpc32_set3dlut_ram12(mpc, lut0, lut_size0, mpcc_id);
@@ -232,46 +187,69 @@ void mpc401_populate_lut(struct mpc *mpc, const enum MCM_LUT_ID id, const union
}
+static uint32_t mpc401_cm_lut_size_to_3dlut_size(const enum dc_cm_lut_size cm_size)
+{
+ uint32_t size = 0;
+
+ switch (cm_size) {
+ case CM_LUT_SIZE_999:
+ size = 1;
+ break;
+ case CM_LUT_SIZE_171717:
+ size = 0;
+ break;
+ default:
+ /* invalid LUT size */
+ ASSERT(false);
+ size = 0;
+ break;
+ }
+
+ return size;
+}
+
void mpc401_program_lut_mode(
struct mpc *mpc,
const enum MCM_LUT_ID id,
- const enum MCM_LUT_XABLE xable,
- bool lut_bank_a,
- int mpcc_id)
+ const bool enable,
+ const bool lut_bank_a,
+ const enum dc_cm_lut_size size,
+ const int mpcc_id)
{
+ uint32_t lut_size;
struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
switch (id) {
case MCM_LUT_3DLUT:
- switch (xable) {
- case MCM_LUT_DISABLE:
+ if (enable) {
+ lut_size = mpc401_cm_lut_size_to_3dlut_size(size);
+ REG_UPDATE_2(MPCC_MCM_3DLUT_MODE[mpcc_id],
+ MPCC_MCM_3DLUT_MODE, lut_bank_a ? 1 : 2,
+ MPCC_MCM_3DLUT_SIZE, lut_size);
+ } else {
+ if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc)
+ mpc32_power_on_shaper_3dlut(mpc, mpcc_id, false);
REG_UPDATE(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_MODE, 0);
- break;
- case MCM_LUT_ENABLE:
- REG_UPDATE(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_MODE, lut_bank_a ? 1 : 2);
- break;
}
break;
case MCM_LUT_SHAPER:
- switch (xable) {
- case MCM_LUT_DISABLE:
- REG_UPDATE(MPCC_MCM_SHAPER_CONTROL[mpcc_id], MPCC_MCM_SHAPER_LUT_MODE, 0);
- break;
- case MCM_LUT_ENABLE:
+ if (enable) {
REG_UPDATE(MPCC_MCM_SHAPER_CONTROL[mpcc_id], MPCC_MCM_SHAPER_LUT_MODE, lut_bank_a ? 1 : 2);
- break;
+ } else {
+ if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc)
+ mpc32_power_on_shaper_3dlut(mpc, mpcc_id, false);
+ REG_UPDATE(MPCC_MCM_SHAPER_CONTROL[mpcc_id], MPCC_MCM_SHAPER_LUT_MODE, 0);
}
break;
case MCM_LUT_1DLUT:
- switch (xable) {
- case MCM_LUT_DISABLE:
- REG_UPDATE(MPCC_MCM_1DLUT_CONTROL[mpcc_id],
- MPCC_MCM_1DLUT_MODE, 0);
- break;
- case MCM_LUT_ENABLE:
+ if (enable) {
REG_UPDATE(MPCC_MCM_1DLUT_CONTROL[mpcc_id],
MPCC_MCM_1DLUT_MODE, 2);
- break;
+ } else {
+ if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc)
+ mpc32_power_on_blnd_lut(mpc, mpcc_id, false);
+ REG_UPDATE(MPCC_MCM_1DLUT_CONTROL[mpcc_id],
+ MPCC_MCM_1DLUT_MODE, 0);
}
REG_UPDATE(MPCC_MCM_1DLUT_CONTROL[mpcc_id],
MPCC_MCM_1DLUT_SELECT, lut_bank_a ? 0 : 1);
@@ -279,14 +257,20 @@ void mpc401_program_lut_mode(
}
}
-void mpc401_program_lut_read_write_control(struct mpc *mpc, const enum MCM_LUT_ID id, bool lut_bank_a, int mpcc_id)
+void mpc401_program_lut_read_write_control(struct mpc *mpc,
+ const enum MCM_LUT_ID id,
+ const bool lut_bank_a,
+ const unsigned int bit_depth,
+ const int mpcc_id)
{
struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
switch (id) {
case MCM_LUT_3DLUT:
mpc32_select_3dlut_ram_mask(mpc, 0xf, mpcc_id);
- REG_UPDATE(MPCC_MCM_3DLUT_READ_WRITE_CONTROL[mpcc_id], MPCC_MCM_3DLUT_RAM_SEL, lut_bank_a ? 0 : 1);
+ REG_UPDATE_2(MPCC_MCM_3DLUT_READ_WRITE_CONTROL[mpcc_id],
+ MPCC_MCM_3DLUT_30BIT_EN, (bit_depth == 10) ? 1 : 0,
+ MPCC_MCM_3DLUT_RAM_SEL, lut_bank_a ? 0 : 1);
break;
case MCM_LUT_SHAPER:
mpc32_configure_shaper_lut(mpc, lut_bank_a, mpcc_id);
@@ -578,6 +562,44 @@ void mpc401_get_gamut_remap(struct mpc *mpc,
arr_reg_val, ARRAY_SIZE(arr_reg_val));
}
+void mpc401_get_lut_mode(struct mpc *mpc,
+ const enum MCM_LUT_ID id,
+ const int mpcc_id,
+ bool *enable,
+ bool *lut_bank_a)
+{
+ struct dcn401_mpc *mpc401 = TO_DCN401_MPC(mpc);
+
+ uint32_t lut_mode = 0;
+ uint32_t lut_select = 0;
+
+ *enable = false;
+ *lut_bank_a = true;
+
+ switch (id) {
+ case MCM_LUT_SHAPER:
+ REG_GET(MPCC_MCM_SHAPER_CONTROL[mpcc_id],
+ MPCC_MCM_SHAPER_MODE_CURRENT, &lut_mode);
+ *enable = lut_mode != 0;
+ *lut_bank_a = lut_mode != 2;
+ break;
+ case MCM_LUT_1DLUT:
+ REG_GET_2(MPCC_MCM_1DLUT_CONTROL[mpcc_id],
+ MPCC_MCM_1DLUT_MODE_CURRENT, &lut_mode,
+ MPCC_MCM_1DLUT_SELECT_CURRENT, &lut_select);
+ *enable = lut_mode != 0;
+ *lut_bank_a = lut_mode == 0 || lut_select == 0;
+ break;
+ case MCM_LUT_3DLUT:
+ default:
+ REG_GET(MPCC_MCM_3DLUT_MODE[mpcc_id],
+ MPCC_MCM_3DLUT_MODE_CURRENT, &lut_mode);
+ *enable = lut_mode != 0;
+ *lut_bank_a = lut_mode != 2;
+ break;
+ }
+}
+
static const struct mpc_funcs dcn401_mpc_funcs = {
.read_mpcc_state = mpc1_read_mpcc_state,
.insert_plane = mpc1_insert_plane,
@@ -616,6 +638,7 @@ static const struct mpc_funcs dcn401_mpc_funcs = {
.populate_lut = mpc401_populate_lut,
.program_lut_read_write_control = mpc401_program_lut_read_write_control,
.program_lut_mode = mpc401_program_lut_mode,
+ .get_lut_mode = mpc401_get_lut_mode,
};
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
index 6d842d7b95c7..c16560c84453 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn401/dcn401_mpc.h
@@ -206,21 +206,32 @@ void dcn401_mpc_construct(struct dcn401_mpc *mpc401,
int num_rmu);
void mpc401_set_movable_cm_location(struct mpc *mpc, enum mpcc_movable_cm_location location, int mpcc_id);
-void mpc401_populate_lut(struct mpc *mpc, const enum MCM_LUT_ID id, const union mcm_lut_params params,
- bool lut_bank_a, int mpcc_id);
+void mpc401_populate_lut(struct mpc *mpc,
+ const enum MCM_LUT_ID id,
+ const union mcm_lut_params *params,
+ bool lut_bank_a,
+ int mpcc_id);
void mpc401_program_lut_mode(
struct mpc *mpc,
const enum MCM_LUT_ID id,
- const enum MCM_LUT_XABLE xable,
- bool lut_bank_a,
- int mpcc_id);
+ const bool enable,
+ const bool lut_bank_a,
+ const enum dc_cm_lut_size size,
+ const int mpcc_id);
+
+void mpc401_get_lut_mode(struct mpc *mpc,
+ const enum MCM_LUT_ID id,
+ const int mpcc_id,
+ bool *enable,
+ bool *lut_bank_a);
void mpc401_program_lut_read_write_control(
struct mpc *mpc,
const enum MCM_LUT_ID id,
- bool lut_bank_a,
- int mpcc_id);
+ const bool lut_bank_a,
+ const unsigned int bit_depth,
+ const int mpcc_id);
void mpc401_set_gamut_remap(
struct mpc *mpc,
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn42/dcn42_mpc.c b/drivers/gpu/drm/amd/display/dc/mpc/dcn42/dcn42_mpc.c
index 304b23109fb0..507dbdbea600 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn42/dcn42_mpc.c
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn42/dcn42_mpc.c
@@ -20,7 +20,7 @@
mpc42->mpc_shift->field_name, mpc42->mpc_mask->field_name
-static void mpc42_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
+void mpc42_init_mpcc(struct mpcc *mpcc, int mpcc_inst)
{
mpcc->mpcc_id = mpcc_inst;
mpcc->dpp_id = 0xf;
@@ -63,154 +63,6 @@ void mpc42_update_blending(
mpcc->blnd_cfg = *blnd_cfg;
}
-/* Shaper functions */
-void mpc42_power_on_shaper_3dlut(
- struct mpc *mpc,
- uint32_t mpcc_id,
- bool power_on)
-{
- uint32_t power_status_shaper = 2;
- uint32_t power_status_3dlut = 2;
- struct dcn42_mpc *mpc42 = TO_DCN42_MPC(mpc);
- int max_retries = 10;
-
- REG_SET(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], 0,
- MPCC_MCM_3DLUT_MEM_PWR_DIS, power_on == true ? 1:0);
- REG_SET(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], 0,
- MPCC_MCM_SHAPER_MEM_PWR_DIS, power_on == true ? 1:0);
- /* wait for memory to fully power up */
- if (power_on && mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) {
- REG_WAIT(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_SHAPER_MEM_PWR_STATE, 0, 1, max_retries);
- REG_WAIT(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_3DLUT_MEM_PWR_STATE, 0, 1, max_retries);
- }
-
- /*read status is not mandatory, it is just for debugging*/
- REG_GET(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_SHAPER_MEM_PWR_STATE, &power_status_shaper);
- REG_GET(MPCC_MCM_MEM_PWR_CTRL[mpcc_id], MPCC_MCM_3DLUT_MEM_PWR_STATE, &power_status_3dlut);
-
- if (power_status_shaper != 0 && power_on == true)
- BREAK_TO_DEBUGGER();
-
- if (power_status_3dlut != 0 && power_on == true)
- BREAK_TO_DEBUGGER();
-}
-
-void mpc42_configure_shaper_lut(
- struct mpc *mpc,
- bool is_ram_a,
- uint32_t mpcc_id)
-{
- struct dcn42_mpc *mpc42 = TO_DCN42_MPC(mpc);
-
- REG_UPDATE(MPCC_MCM_SHAPER_SCALE_G_B[mpcc_id],
- MPCC_MCM_SHAPER_SCALE_B, 0x7000);
- REG_UPDATE(MPCC_MCM_SHAPER_SCALE_G_B[mpcc_id],
- MPCC_MCM_SHAPER_SCALE_G, 0x7000);
- REG_UPDATE(MPCC_MCM_SHAPER_SCALE_R[mpcc_id],
- MPCC_MCM_SHAPER_SCALE_R, 0x7000);
- REG_UPDATE(MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK[mpcc_id],
- MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK, 7);
- REG_UPDATE(MPCC_MCM_SHAPER_LUT_WRITE_EN_MASK[mpcc_id],
- MPCC_MCM_SHAPER_LUT_WRITE_SEL, is_ram_a == true ? 0:1);
- REG_SET(MPCC_MCM_SHAPER_LUT_INDEX[mpcc_id], 0, MPCC_MCM_SHAPER_LUT_INDEX, 0);
-}
-
-
-void mpc42_program_3dlut_size(struct mpc *mpc, uint32_t width, int mpcc_id)
-{
- struct dcn42_mpc *mpc42 = TO_DCN42_MPC(mpc);
- uint32_t size = 0xff;
-
- REG_GET(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_SIZE, &size);
-
- REG_UPDATE(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_SIZE,
- (width == 33) ? 2 :
- (width == 17) ? 0 : 2);
-
- REG_GET(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_SIZE, &size);
-}
-
-void mpc42_program_3dlut_fl_bias_scale(struct mpc *mpc, uint16_t bias, uint16_t scale, int mpcc_id)
-{
- struct dcn42_mpc *mpc42 = TO_DCN42_MPC(mpc);
-
- REG_UPDATE_2(MPCC_MCM_3DLUT_OUT_OFFSET_R[mpcc_id],
- MPCC_MCM_3DLUT_OUT_OFFSET_R, bias,
- MPCC_MCM_3DLUT_OUT_SCALE_R, scale);
-
- REG_UPDATE_2(MPCC_MCM_3DLUT_OUT_OFFSET_G[mpcc_id],
- MPCC_MCM_3DLUT_OUT_OFFSET_G, bias,
- MPCC_MCM_3DLUT_OUT_SCALE_G, scale);
-
- REG_UPDATE_2(MPCC_MCM_3DLUT_OUT_OFFSET_B[mpcc_id],
- MPCC_MCM_3DLUT_OUT_OFFSET_B, bias,
- MPCC_MCM_3DLUT_OUT_SCALE_B, scale);
-}
-
-void mpc42_program_bit_depth(struct mpc *mpc, uint16_t bit_depth, int mpcc_id)
-{
- struct dcn42_mpc *mpc42 = TO_DCN42_MPC(mpc);
-
- REG_UPDATE(MPCC_MCM_3DLUT_READ_WRITE_CONTROL[mpcc_id], MPCC_MCM_3DLUT_WRITE_EN_MASK, 0xF);
-
- //program bit_depth
- REG_UPDATE(MPCC_MCM_3DLUT_READ_WRITE_CONTROL[mpcc_id],
- MPCC_MCM_3DLUT_30BIT_EN,
- (bit_depth == 10) ? 1 : 0);
-}
-
-bool mpc42_is_config_supported(uint32_t width)
-{
- if (width == 17)
- return true;
-
- return false;
-}
-
-void mpc42_populate_lut(struct mpc *mpc, const union mcm_lut_params params,
- bool lut_bank_a, int mpcc_id)
-{
- const enum dc_lut_mode next_mode = lut_bank_a ? LUT_RAM_A : LUT_RAM_B;
- const struct pwl_params *lut_shaper = params.pwl;
-
- if (lut_shaper == NULL)
- return;
- if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc)
- mpc42_power_on_shaper_3dlut(mpc, mpcc_id, true);
-
- mpc42_configure_shaper_lut(mpc, next_mode == LUT_RAM_A, mpcc_id);
-
- if (next_mode == LUT_RAM_A)
- mpc32_program_shaper_luta_settings(mpc, lut_shaper, mpcc_id);
- else
- mpc32_program_shaper_lutb_settings(mpc, lut_shaper, mpcc_id);
-
- mpc32_program_shaper_lut(
- mpc, lut_shaper->rgb_resulted, lut_shaper->hw_points_num, mpcc_id);
-
- mpc42_power_on_shaper_3dlut(mpc, mpcc_id, false);
-}
-
-void mpc42_program_lut_read_write_control(struct mpc *mpc, const enum MCM_LUT_ID id,
- bool lut_bank_a, bool enabled, int mpcc_id)
-{
- struct dcn42_mpc *mpc42 = TO_DCN42_MPC(mpc);
-
- switch (id) {
- case MCM_LUT_3DLUT:
- REG_UPDATE(MPCC_MCM_3DLUT_MODE[mpcc_id], MPCC_MCM_3DLUT_MODE,
- (!enabled) ? 0 :
- (lut_bank_a) ? 1 : 2);
- REG_UPDATE(MPCC_MCM_3DLUT_READ_WRITE_CONTROL[mpcc_id], MPCC_MCM_3DLUT_RAM_SEL, lut_bank_a ? 0 : 1);
- break;
- case MCM_LUT_SHAPER:
- mpc32_configure_shaper_lut(mpc, lut_bank_a, mpcc_id);
- break;
- default:
- break;
- }
-}
-
/* RMCM Shaper functions */
void mpc42_power_on_rmcm_shaper_3dlut(
struct mpc *mpc,
@@ -674,32 +526,47 @@ void mpc42_program_rmcm_lut_read_write_control(struct mpc *mpc, const enum MCM_L
}
}
-void mpc42_program_lut_mode(struct mpc *mpc, const enum MCM_LUT_XABLE xable,
- bool lut_bank_a, int mpcc_id)
+void mpc42_program_lut_mode(struct mpc *mpc,
+ bool enable,
+ bool lut_bank_a,
+ int mpcc_id)
{
struct dcn42_mpc *mpc42 = TO_DCN42_MPC(mpc);
- switch (xable) {
- case MCM_LUT_DISABLE:
+ if (enable) {
+ REG_UPDATE(MPC_RMCM_SHAPER_CONTROL[mpcc_id], MPC_RMCM_SHAPER_LUT_MODE, lut_bank_a ? 1 : 2);
+ } else {
REG_UPDATE(MPC_RMCM_SHAPER_CONTROL[mpcc_id], MPC_RMCM_SHAPER_LUT_MODE, 0);
+ }
+}
+
+static uint32_t mpc42_get_rmcm_3dlut_width(
+ const enum dc_cm_lut_size size)
+{
+ uint32_t width = 0;
+
+ switch (size) {
+ case CM_LUT_SIZE_333333:
+ width = 2;
break;
- case MCM_LUT_ENABLE:
- REG_UPDATE(MPC_RMCM_SHAPER_CONTROL[mpcc_id], MPC_RMCM_SHAPER_LUT_MODE, lut_bank_a ? 1 : 2);
+ case CM_LUT_SIZE_171717:
+ default:
+ width = 0;
break;
}
+
+ return width;
}
-void mpc42_program_rmcm_3dlut_size(struct mpc *mpc, uint32_t width, int mpcc_id)
+void mpc42_program_rmcm_3dlut_size(struct mpc *mpc,
+ const enum dc_cm_lut_size size,
+ int mpcc_id)
{
struct dcn42_mpc *mpc42 = TO_DCN42_MPC(mpc);
- uint32_t size = 0xff;
-
- REG_GET(MPC_RMCM_3DLUT_MODE[mpcc_id], MPC_RMCM_3DLUT_SIZE, &size);
+ uint32_t width = mpc42_get_rmcm_3dlut_width(size);
- REG_UPDATE(MPC_RMCM_3DLUT_MODE[mpcc_id], MPC_RMCM_3DLUT_SIZE,
- (width == 33) ? 2 : 0);
-
- REG_GET(MPC_RMCM_3DLUT_MODE[mpcc_id], MPC_RMCM_3DLUT_SIZE, &size);
+ REG_UPDATE(MPC_RMCM_3DLUT_MODE[mpcc_id],
+ MPC_RMCM_3DLUT_SIZE, width);
}
void mpc42_program_rmcm_3dlut_fast_load_bias_scale(struct mpc *mpc, uint16_t bias, uint16_t scale, int mpcc_id)
@@ -731,14 +598,6 @@ void mpc42_program_rmcm_bit_depth(struct mpc *mpc, uint16_t bit_depth, int mpcc_
(bit_depth == 10) ? 1 : 0);
}
-bool mpc42_is_rmcm_config_supported(uint32_t width)
-{
- if (width == 17 || width == 33)
- return true;
-
- return false;
-}
-
void mpc42_set_fl_config(
struct mpc *mpc,
struct mpc_fl_3dlut_config *cfg,
@@ -746,6 +605,7 @@ void mpc42_set_fl_config(
{
struct dcn42_mpc *mpc42 = TO_DCN42_MPC(mpc);
+ uint32_t width = mpc42_get_rmcm_3dlut_width(cfg->size);
/*
From: Jie Zhou
@@ -786,7 +646,7 @@ void mpc42_set_fl_config(
//width
REG_UPDATE_2(MPC_RMCM_3DLUT_MODE[mpcc_id],
- MPC_RMCM_3DLUT_SIZE, (cfg->width == 33) ? 2 : 0,
+ MPC_RMCM_3DLUT_SIZE, width,
MPC_RMCM_3DLUT_MODE, (!cfg->enabled) ? 0 : (cfg->select_lut_bank_a) ? 1 : 2);
//connect to hubp
@@ -799,182 +659,6 @@ void mpc42_set_fl_config(
REG_UPDATE(MPC_RMCM_CNTL[mpcc_id], MPC_RMCM_CNTL, cfg->enabled ? 0 : 0xF);
}
-//static void rmcm_program_gamut_remap(
-// struct mpc *mpc,
-// unsigned int mpcc_id,
-// const uint16_t *regval,
-// enum mpcc_gamut_remap_id gamut_remap_block_id,
-// enum mpcc_gamut_remap_mode_select mode_select)
-//{
-// struct color_matrices_reg gamut_regs;
-// struct dcn42_mpc *mpc42 = TO_DCN42_MPC(mpc);
-//
-// if (gamut_remap_block_id == MPCC_OGAM_GAMUT_REMAP ||
-// gamut_remap_block_id == MPCC_MCM_FIRST_GAMUT_REMAP ||
-// gamut_remap_block_id == MPCC_MCM_SECOND_GAMUT_REMAP) {
-// mpc_program_gamut_remap(mpc, mpcc_id, regval, gamut_remap_block_id, mode_select);
-// return;
-// }
-// if (gamut_remap_block_id == MPCC_OGAM_GAMUT_REMAP) {
-//
-// if (regval == NULL || mode_select == MPCC_GAMUT_REMAP_MODE_SELECT_0) {
-// REG_SET(MPC_RMCM_GAMUT_REMAP_MODE[mpcc_id], 0,
-// MPC_RMCM_GAMUT_REMAP_MODE, mode_select);
-// return;
-// }
-//
-// gamut_regs.shifts.csc_c11 = mpc42->mpc_shift->MPCC_GAMUT_REMAP_C11_A;
-// gamut_regs.masks.csc_c11 = mpc42->mpc_mask->MPCC_GAMUT_REMAP_C11_A;
-// gamut_regs.shifts.csc_c12 = mpc42->mpc_shift->MPCC_GAMUT_REMAP_C12_A;
-// gamut_regs.masks.csc_c12 = mpc42->mpc_mask->MPCC_GAMUT_REMAP_C12_A;
-//
-// switch (mode_select) {
-// case MPCC_GAMUT_REMAP_MODE_SELECT_1:
-// gamut_regs.csc_c11_c12 = REG(MPC_RMCM_GAMUT_REMAP_C11_C12_A[mpcc_id]);
-// gamut_regs.csc_c33_c34 = REG(MPC_RMCM_GAMUT_REMAP_C33_C34_A[mpcc_id]);
-// break;
-// case MPCC_GAMUT_REMAP_MODE_SELECT_2:
-// gamut_regs.csc_c11_c12 = REG(MPC_RMCM_GAMUT_REMAP_C11_C12_B[mpcc_id]);
-// gamut_regs.csc_c33_c34 = REG(MPC_RMCM_GAMUT_REMAP_C33_C34_B[mpcc_id]);
-// break;
-// default:
-// break;
-// }
-//
-// cm_helper_program_color_matrices(
-// mpc->ctx,
-// regval,
-// &gamut_regs);
-//
-// //select coefficient set to use, set A (MODE_1) or set B (MODE_2)
-// REG_SET(MPC_RMCM_GAMUT_REMAP_MODE[mpcc_id], 0, MPC_RMCM_GAMUT_REMAP_MODE, mode_select);
-// }
-//}
-
-//static bool is_mpc_legacy_gamut_id(enum mpcc_gamut_remap_id gamut_remap_block_id)
-//{
-// if (gamut_remap_block_id == MPCC_OGAM_GAMUT_REMAP ||
-// gamut_remap_block_id == MPCC_MCM_FIRST_GAMUT_REMAP ||
-// gamut_remap_block_id == MPCC_MCM_SECOND_GAMUT_REMAP) {
-// return true;
-// }
-// return false;
-//}
-//static void program_gamut_remap(
-// struct mpc *mpc,
-// unsigned int mpcc_id,
-// const uint16_t *regval,
-// enum mpcc_gamut_remap_id gamut_remap_block_id,
-// enum mpcc_gamut_remap_mode_select mode_select)
-//{
-// if (is_mpc_legacy_gamut_id(gamut_remap_block_id))
-// mpc_program_gamut_remap(mpc, mpcc_id, regval, gamut_remap_block_id, mode_select);
-// else
-// rmcm_program_gamut_remap(mpc, mpcc_id, regval, gamut_remap_block_id, mode_select);
-//}
-
-//void mpc42_set_gamut_remap(
-// struct mpc *mpc,
-// int mpcc_id,
-// const struct mpc_grph_gamut_adjustment *adjust)
-//{
-// struct dcn42_mpc *mpc42 = TO_DCN42_MPC(mpc);
-// unsigned int i = 0;
-// uint32_t mode_select = 0;
-//
-// if (adjust->gamut_adjust_type != GRAPHICS_GAMUT_ADJUST_TYPE_SW) {
-// /* Bypass / Disable if type is bypass or hw */
-// program_gamut_remap(mpc, mpcc_id, NULL,
-// adjust->mpcc_gamut_remap_block_id, MPCC_GAMUT_REMAP_MODE_SELECT_0);
-// } else {
-// struct fixed31_32 arr_matrix[12];
-// uint16_t arr_reg_val[12];
-//
-// for (i = 0; i < 12; i++)
-// arr_matrix[i] = adjust->temperature_matrix[i];
-//
-// convert_float_matrix(arr_reg_val, arr_matrix, 12);
-//
-// if (is_mpc_legacy_gamut_id(adjust->mpcc_gamut_remap_block_id))
-// REG_GET(MPCC_GAMUT_REMAP_MODE[mpcc_id],
-// MPCC_GAMUT_REMAP_MODE_CURRENT, &mode_select);
-// else
-// REG_GET(MPC_RMCM_GAMUT_REMAP_MODE[mpcc_id],
-// MPC_RMCM_GAMUT_REMAP_MODE_CURRENT, &mode_select);
-//
-// //If current set in use not set A (MODE_1), then use set A, otherwise use set B
-// if (mode_select != MPCC_GAMUT_REMAP_MODE_SELECT_1)
-// mode_select = MPCC_GAMUT_REMAP_MODE_SELECT_1;
-// else
-// mode_select = MPCC_GAMUT_REMAP_MODE_SELECT_2;
-//
-// program_gamut_remap(mpc, mpcc_id, arr_reg_val,
-// adjust->mpcc_gamut_remap_block_id, mode_select);
-// }
-//}
-
-//static void read_gamut_remap(struct mpc *mpc,
-// int mpcc_id,
-// uint16_t *regval,
-// enum mpcc_gamut_remap_id gamut_remap_block_id,
-// uint32_t *mode_select)
-//{
-// struct color_matrices_reg gamut_regs = {0};
-// struct dcn42_mpc *mpc42 = TO_DCN42_MPC(mpc);
-//
-// if (is_mpc_legacy_gamut_id(gamut_remap_block_id)) {
-// mpc_read_gamut_remap(mpc, mpcc_id, regval, gamut_remap_block_id, mode_select);
-// }
-// if (gamut_remap_block_id == MPCC_RMCM_GAMUT_REMAP) {
-// //current coefficient set in use
-// REG_GET(MPC_RMCM_GAMUT_REMAP_MODE[mpcc_id], MPC_RMCM_GAMUT_REMAP_MODE, mode_select);
-//
-// gamut_regs.shifts.csc_c11 = mpc42->mpc_shift->MPCC_GAMUT_REMAP_C11_A;
-// gamut_regs.masks.csc_c11 = mpc42->mpc_mask->MPCC_GAMUT_REMAP_C11_A;
-// gamut_regs.shifts.csc_c12 = mpc42->mpc_shift->MPCC_GAMUT_REMAP_C12_A;
-// gamut_regs.masks.csc_c12 = mpc42->mpc_mask->MPCC_GAMUT_REMAP_C12_A;
-//
-// switch (*mode_select) {
-// case MPCC_GAMUT_REMAP_MODE_SELECT_1:
-// gamut_regs.csc_c11_c12 = REG(MPC_RMCM_GAMUT_REMAP_C11_C12_A[mpcc_id]);
-// gamut_regs.csc_c33_c34 = REG(MPC_RMCM_GAMUT_REMAP_C33_C34_A[mpcc_id]);
-// break;
-// case MPCC_GAMUT_REMAP_MODE_SELECT_2:
-// gamut_regs.csc_c11_c12 = REG(MPC_RMCM_GAMUT_REMAP_C11_C12_B[mpcc_id]);
-// gamut_regs.csc_c33_c34 = REG(MPC_RMCM_GAMUT_REMAP_C33_C34_B[mpcc_id]);
-// break;
-// default:
-// break;
-// }
-// }
-//
-// if (*mode_select != MPCC_GAMUT_REMAP_MODE_SELECT_0) {
-// cm_helper_read_color_matrices(
-// mpc42->base.ctx,
-// regval,
-// &gamut_regs);
-// }
-//}
-
-//void mpc42_get_gamut_remap(struct mpc *mpc,
-// int mpcc_id,
-// struct mpc_grph_gamut_adjustment *adjust)
-//{
-// uint16_t arr_reg_val[12] = {0};
-// uint32_t mode_select;
-//
-// read_gamut_remap(mpc, mpcc_id, arr_reg_val, adjust->mpcc_gamut_remap_block_id, &mode_select);
-//
-// if (mode_select == MPCC_GAMUT_REMAP_MODE_SELECT_0) {
-// adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_BYPASS;
-// return;
-// }
-//
-// adjust->gamut_adjust_type = GRAPHICS_GAMUT_ADJUST_TYPE_SW;
-// convert_hw_matrix(adjust->temperature_matrix,
-// arr_reg_val, ARRAY_SIZE(arr_reg_val));
-//}
-
void mpc42_read_mpcc_state(
struct mpc *mpc,
int mpcc_inst,
@@ -1071,14 +755,7 @@ static const struct mpc_funcs dcn42_mpc_funcs = {
.populate_lut = mpc401_populate_lut,
.program_lut_read_write_control = mpc401_program_lut_read_write_control,
.program_lut_mode = mpc401_program_lut_mode,
- .mcm = {
- .program_lut_read_write_control = mpc42_program_lut_read_write_control,
- .program_3dlut_size = mpc42_program_3dlut_size,
- .program_bias_scale = mpc42_program_3dlut_fl_bias_scale,
- .program_bit_depth = mpc42_program_bit_depth,
- .is_config_supported = mpc42_is_config_supported,
- .populate_lut = mpc42_populate_lut,
- },
+ .get_lut_mode = mpc401_get_lut_mode,
.rmcm = {
.enable_3dlut_fl = mpc42_enable_3dlut_fl,
.update_3dlut_fast_load_select = mpc42_update_3dlut_fast_load_select,
@@ -1087,7 +764,6 @@ static const struct mpc_funcs dcn42_mpc_funcs = {
.program_3dlut_size = mpc42_program_rmcm_3dlut_size,
.program_bias_scale = mpc42_program_rmcm_3dlut_fast_load_bias_scale,
.program_bit_depth = mpc42_program_rmcm_bit_depth,
- .is_config_supported = mpc42_is_rmcm_config_supported,
.power_on_shaper_3dlut = mpc42_power_on_rmcm_shaper_3dlut,
.populate_lut = mpc42_populate_rmcm_lut,
.fl_3dlut_configure = mpc42_set_fl_config,
diff --git a/drivers/gpu/drm/amd/display/dc/mpc/dcn42/dcn42_mpc.h b/drivers/gpu/drm/amd/display/dc/mpc/dcn42/dcn42_mpc.h
index 9b87fd2be904..a5f7f4f2bb3b 100644
--- a/drivers/gpu/drm/amd/display/dc/mpc/dcn42/dcn42_mpc.h
+++ b/drivers/gpu/drm/amd/display/dc/mpc/dcn42/dcn42_mpc.h
@@ -882,49 +882,7 @@ void dcn42_mpc_construct(struct dcn42_mpc *mpc401,
int num_mpcc,
int num_rmu);
-
-void mpc42_program_shaper_lutb_settings(
- struct mpc *mpc,
- const struct pwl_params *params,
- uint32_t mpcc_id);
-void mpc42_program_shaper_luta_settings(
- struct mpc *mpc,
- const struct pwl_params *params,
- uint32_t mpcc_id);
-void mpc42_configure_shaper_lut(
- struct mpc *mpc,
- bool is_ram_a,
- uint32_t mpcc_id);
-void mpc42_power_on_shaper_3dlut(
- struct mpc *mpc,
- uint32_t mpcc_id,
- bool power_on);
-void mpc42_program_3dlut_size(
- struct mpc *mpc,
- uint32_t width,
- int mpcc_id);
-void mpc42_program_3dlut_fl_bias_scale(
- struct mpc *mpc,
- uint16_t bias,
- uint16_t scale,
- int mpcc_id);
-void mpc42_program_bit_depth(
- struct mpc *mpc,
- uint16_t bit_depth,
- int mpcc_id);
-void mpc42_populate_lut(
- struct mpc *mpc,
- const union mcm_lut_params params,
- bool lut_bank_a,
- int mpcc_id);
-void mpc42_program_lut_read_write_control(
- struct mpc *mpc,
- const enum MCM_LUT_ID id,
- bool lut_bank_a,
- bool enabled,
- int mpcc_id);
-
-bool mpc42_is_config_supported(uint32_t width);
+void mpc42_init_mpcc(struct mpcc *mpcc, int mpcc_inst);
/* RMCM */
void mpc42_program_rmcm_shaper_lut(
@@ -969,12 +927,12 @@ void mpc42_program_rmcm_lut_read_write_control(
int mpcc_id);
void mpc42_program_lut_mode(
struct mpc *mpc,
- const enum MCM_LUT_XABLE xable,
+ bool enable,
bool lut_bank_a,
int mpcc_id);
void mpc42_program_rmcm_3dlut_size(
struct mpc *mpc,
- uint32_t width,
+ const enum dc_cm_lut_size size,
int mpcc_id);
void mpc42_program_rmcm_3dlut_fast_load_bias_scale(
struct mpc *mpc,
@@ -986,8 +944,6 @@ void mpc42_program_rmcm_bit_depth(
uint16_t bit_depth,
int mpcc_id);
-bool mpc42_is_rmcm_config_supported(uint32_t width);
-
void mpc42_set_fl_config(
struct mpc *mpc,
struct mpc_fl_3dlut_config *cfg,
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
index cf05620fd8f5..138081e6cc97 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h
@@ -218,7 +218,7 @@
uint32_t OTG_CRC_SIG_BLUE_CONTROL_MASK; \
uint32_t OTG_CRC_SIG_RED_GREEN_MASK; \
uint32_t OTG_DLPC_CONTROL; \
- uint32_t OTG_DRR_CONTROL2; \
+ uint32_t OTG_DRR_CONTOL2; \
uint32_t OTG_DRR_TIMING_INT_STATUS; \
uint32_t OTG_GLOBAL_CONTROL3; \
uint32_t OTG_GLOBAL_SYNC_STATUS; \
@@ -676,6 +676,10 @@ struct dcn_optc_registers {
type OTG_V_COUNT_STOP_TIMER;
#define TG_REG_FIELD_LIST_DCN3_6(type) \
+ type OPTC_RSMU_UNDERFLOW_CLEAR;\
+ type OPTC_RSMU_UNDERFLOW_OCCURRED_STATUS;\
+ type OPTC_RSMU_UNDERFLOW_INT_EN;\
+ type OPTC_RSMU_UNDERFLOW_INT_STATUS;\
type OTG_CRC_POLY_SEL; \
type CRC0_R_CR32; \
type CRC0_G_Y32; \
@@ -703,7 +707,7 @@ struct dcn_optc_shift {
TG_REG_FIELD_LIST_DCN3_5(uint8_t)
TG_REG_FIELD_LIST_DCN3_6(uint8_t)
TG_REG_FIELD_LIST_DCN401(uint8_t)
- TG_REG_FIELD_LIST_DCN42(uint8_t)
+ TG_REG_FIELD_LIST_DCN42(uint8_t)
};
struct dcn_optc_mask {
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
index c6417538090f..893d2aff1f82 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn31/dcn31_optc.c
@@ -363,7 +363,7 @@ void optc31_read_reg_state(struct timing_generator *optc, struct dcn_optc_reg_st
optc_reg_state->otg_crc3_data_rg = REG_READ(OTG_CRC3_DATA_RG);
optc_reg_state->otg_dlpc_control = REG_READ(OTG_DLPC_CONTROL);
optc_reg_state->otg_double_buffer_control = REG_READ(OTG_DOUBLE_BUFFER_CONTROL);
- optc_reg_state->otg_drr_control2 = REG_READ(OTG_DRR_CONTROL2);
+ optc_reg_state->otg_drr_control2 = REG_READ(OTG_DRR_CONTOL2);
optc_reg_state->otg_drr_control = REG_READ(OTG_DRR_CONTROL);
optc_reg_state->otg_drr_timing_int_status = REG_READ(OTG_DRR_TIMING_INT_STATUS);
optc_reg_state->otg_drr_trigger_window = REG_READ(OTG_DRR_TRIGGER_WINDOW);
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn42/dcn42_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn42/dcn42_optc.c
index effd05b3685f..ed66a2bbb8ae 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn42/dcn42_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn42/dcn42_optc.c
@@ -6,11 +6,13 @@
#include "dcn30/dcn30_optc.h"
#include "dcn31/dcn31_optc.h"
#include "dcn32/dcn32_optc.h"
+#include "dcn35/dcn35_optc.h"
#include "dcn401/dcn401_optc.h"
#include "reg_helper.h"
#include "dc.h"
#include "dcn_calc_math.h"
#include "dc_dmub_srv.h"
+#include "dc_trace.h"
#define REG(reg)\
optc1->tg_regs->reg
@@ -108,6 +110,89 @@ void optc42_disable_pwa(struct timing_generator *optc)
REG_UPDATE(OTG_PWA_FRAME_SYNC_CONTROL,
OTG_PWA_FRAME_SYNC_EN, 0);
}
+void optc42_clear_optc_underflow(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+
+ REG_UPDATE(OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, 1);
+ REG_UPDATE(OPTC_RSMU_UNDERFLOW, OPTC_RSMU_UNDERFLOW_CLEAR, 1);
+}
+bool optc42_is_optc_underflow_occurred(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t underflow_occurred = 0, rsmu_underflow_occurred = 0;
+
+ REG_GET(OPTC_INPUT_GLOBAL_CONTROL,
+ OPTC_UNDERFLOW_OCCURRED_STATUS,
+ &underflow_occurred);
+
+ REG_GET(OPTC_RSMU_UNDERFLOW,
+ OPTC_RSMU_UNDERFLOW_OCCURRED_STATUS,
+ &rsmu_underflow_occurred);
+ return (underflow_occurred == 1 || rsmu_underflow_occurred);
+}
+/* disable_crtc */
+bool optc42_disable_crtc(struct timing_generator *optc)
+{
+ optc401_disable_crtc(optc);
+ optc42_clear_optc_underflow(optc);
+
+ return true;
+}
+static void optc42_set_timing_double_buffer(struct timing_generator *optc, bool enable)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t mode = enable ? 2 : 0;
+ /* actually we have 4 modes now, use as the same as previous dcn3x
+ * 00 OTG_DOUBLE_BUFFER_CONTROL_OTG_DRR_TIMING_DBUF_UPDATE_MODE_0 Double buffer update occurs at any time in a frame.
+ * 01 OTG_DOUBLE_BUFFER_CONTROL_OTG_DRR_TIMING_DBUF_UPDATE_MODE_1 Double buffer update occurs at OTG start of frame.
+ * 02 OTG_DOUBLE_BUFFER_CONTROL_OTG_DRR_TIMING_DBUF_UPDATE_MODE_2 Double buffer occurs DP start of frame.
+ * 03 OTG_DOUBLE_BUFFER_CONTROL_OTG_DRR_TIMING_DBUF_UPDATE_MODE_3 Reserved.
+ */
+
+ REG_UPDATE(OTG_DOUBLE_BUFFER_CONTROL,
+ OTG_DRR_TIMING_DBUF_UPDATE_MODE, mode);
+}
+void optc42_tg_init(struct timing_generator *optc)
+{
+ optc42_set_timing_double_buffer(optc, true);
+ optc42_clear_optc_underflow(optc);
+}
+
+void optc42_lock_doublebuffer_enable(struct timing_generator *optc)
+{
+ struct optc *optc1 = DCN10TG_FROM_TG(optc);
+ uint32_t v_blank_start = 0;
+ uint32_t v_blank_end = 0;
+ uint32_t h_blank_start = 0;
+ uint32_t h_blank_end = 0;
+
+ REG_GET_2(OTG_V_BLANK_START_END,
+ OTG_V_BLANK_START, &v_blank_start,
+ OTG_V_BLANK_END, &v_blank_end);
+ REG_GET_2(OTG_H_BLANK_START_END,
+ OTG_H_BLANK_START, &h_blank_start,
+ OTG_H_BLANK_END, &h_blank_end);
+
+ REG_UPDATE_2(OTG_GLOBAL_CONTROL1,
+ MASTER_UPDATE_LOCK_DB_START_Y, v_blank_start,
+ MASTER_UPDATE_LOCK_DB_END_Y, v_blank_start);
+ REG_UPDATE_2(OTG_GLOBAL_CONTROL4,
+ DIG_UPDATE_POSITION_X, 20,
+ DIG_UPDATE_POSITION_Y, v_blank_start);
+ REG_UPDATE_3(OTG_GLOBAL_CONTROL0,
+ MASTER_UPDATE_LOCK_DB_START_X, h_blank_start - 200 - 1,
+ MASTER_UPDATE_LOCK_DB_END_X, h_blank_end,
+ MASTER_UPDATE_LOCK_DB_EN, 1);
+ REG_UPDATE(OTG_GLOBAL_CONTROL2, GLOBAL_UPDATE_LOCK_EN, 1);
+
+ REG_SET_3(OTG_VUPDATE_KEEPOUT, 0,
+ MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_START_OFFSET, 0,
+ MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_END_OFFSET, 100,
+ OTG_MASTER_UPDATE_LOCK_VUPDATE_KEEPOUT_EN, 1);
+
+ TRACE_OPTC_LOCK_UNLOCK_STATE(optc1, optc->inst, true);
+}
static struct timing_generator_funcs dcn42_tg_funcs = {
.validate_timing = optc1_validate_timing,
@@ -117,7 +202,7 @@ static struct timing_generator_funcs dcn42_tg_funcs = {
.setup_vertical_interrupt2 = optc1_setup_vertical_interrupt2,
.program_global_sync = optc401_program_global_sync,
.enable_crtc = optc401_enable_crtc,
- .disable_crtc = optc401_disable_crtc,
+ .disable_crtc = optc42_disable_crtc,
.phantom_crtc_post_enable = optc401_phantom_crtc_post_enable,
.disable_phantom_crtc = optc401_disable_phantom_otg,
/* used by enable_timing_synchronization. Not need for FPGA */
@@ -138,7 +223,7 @@ static struct timing_generator_funcs dcn42_tg_funcs = {
.disable_reset_trigger = optc1_disable_reset_trigger,
.lock = optc3_lock,
.unlock = optc1_unlock,
- .lock_doublebuffer_enable = optc3_lock_doublebuffer_enable,
+ .lock_doublebuffer_enable = optc42_lock_doublebuffer_enable,
.lock_doublebuffer_disable = optc3_lock_doublebuffer_disable,
.enable_optc_clock = optc1_enable_optc_clock,
.set_drr = optc401_set_drr,
@@ -147,13 +232,13 @@ static struct timing_generator_funcs dcn42_tg_funcs = {
.set_static_screen_control = optc1_set_static_screen_control,
.program_stereo = optc1_program_stereo,
.is_stereo_left_eye = optc1_is_stereo_left_eye,
- .tg_init = optc3_tg_init,
+ .tg_init = optc42_tg_init,
.is_tg_enabled = optc1_is_tg_enabled,
- .is_optc_underflow_occurred = optc1_is_optc_underflow_occurred,
- .clear_optc_underflow = optc1_clear_optc_underflow,
+ .is_optc_underflow_occurred = optc42_is_optc_underflow_occurred,
+ .clear_optc_underflow = optc42_clear_optc_underflow,
.setup_global_swap_lock = NULL,
.get_crc = optc42_get_crc,
- .configure_crc = optc1_configure_crc,
+ .configure_crc = optc35_configure_crc,
.set_dsc_config = optc3_set_dsc_config,
.get_dsc_status = optc2_get_dsc_status,
.set_dwb_source = NULL,
@@ -162,6 +247,7 @@ static struct timing_generator_funcs dcn42_tg_funcs = {
.wait_odm_doublebuffer_pending_clear = optc32_wait_odm_doublebuffer_pending_clear,
.set_h_timing_div_manual_mode = optc401_set_h_timing_div_manual_mode,
.get_optc_source = optc2_get_optc_source,
+ .wait_otg_disable = optc35_wait_otg_disable,
.set_out_mux = optc401_set_out_mux,
.set_drr_trigger_window = optc3_set_drr_trigger_window,
.set_vtotal_change_limit = optc3_set_vtotal_change_limit,
@@ -171,6 +257,8 @@ static struct timing_generator_funcs dcn42_tg_funcs = {
.program_manual_trigger = optc2_program_manual_trigger,
.setup_manual_trigger = optc2_setup_manual_trigger,
.get_hw_timing = optc1_get_hw_timing,
+ .init_odm = optc3_init_odm,
+ .set_long_vtotal = optc35_set_long_vtotal,
.is_two_pixels_per_container = optc1_is_two_pixels_per_container,
.get_optc_double_buffer_pending = optc3_get_optc_double_buffer_pending,
.get_otg_double_buffer_pending = optc3_get_otg_update_pending,
@@ -178,6 +266,7 @@ static struct timing_generator_funcs dcn42_tg_funcs = {
.set_vupdate_keepout = optc401_set_vupdate_keepout,
.wait_update_lock_status = optc401_wait_update_lock_status,
.optc_read_reg_state = optc31_read_reg_state,
+ .read_otg_state = optc31_read_otg_state,
.enable_otg_pwa = optc42_enable_pwa,
.disable_otg_pwa = optc42_disable_pwa,
};
@@ -194,5 +283,9 @@ void dcn42_timing_generator_init(struct optc *optc1)
optc1->min_v_blank_interlace = 5;
optc1->min_h_sync_width = 4;
optc1->min_v_sync_width = 1;
+ optc1->max_frame_count = 0xFFFFFF;
+
+ dcn35_timing_generator_set_fgcg(
+ optc1, CTX->dc->debug.enable_fine_grain_clock_gating.bits.optc);
}
diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn42/dcn42_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn42/dcn42_optc.h
index 45d2187efaca..fc7192f01b33 100644
--- a/drivers/gpu/drm/amd/display/dc/optc/dcn42/dcn42_optc.h
+++ b/drivers/gpu/drm/amd/display/dc/optc/dcn42/dcn42_optc.h
@@ -119,6 +119,10 @@
SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_OCCURRED_STATUS, mask_sh),\
SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_DOUBLE_BUFFER_PENDING, mask_sh),\
SF(ODM0_OPTC_INPUT_GLOBAL_CONTROL, OPTC_UNDERFLOW_CLEAR, mask_sh),\
+ SF(ODM0_OPTC_RSMU_UNDERFLOW, OPTC_RSMU_UNDERFLOW_OCCURRED_STATUS, mask_sh),\
+ SF(ODM0_OPTC_RSMU_UNDERFLOW, OPTC_RSMU_UNDERFLOW_CLEAR, mask_sh),\
+ SF(ODM0_OPTC_RSMU_UNDERFLOW, OPTC_RSMU_UNDERFLOW_INT_EN, mask_sh),\
+ SF(ODM0_OPTC_RSMU_UNDERFLOW, OPTC_RSMU_UNDERFLOW_INT_STATUS, mask_sh),\
SF(VTG0_CONTROL, VTG0_ENABLE, mask_sh),\
SF(VTG0_CONTROL, VTG0_FP2, mask_sh),\
SF(VTG0_CONTROL, VTG0_VCOUNT_INIT, mask_sh),\
@@ -202,10 +206,15 @@
SF(OTG0_OTG_PWA_FRAME_SYNC_CONTROL, OTG_PWA_FRAME_SYNC_EN, mask_sh),\
SF(OTG0_OTG_PWA_FRAME_SYNC_CONTROL, OTG_PWA_FRAME_SYNC_VCOUNT_MODE, mask_sh),\
SF(OTG0_OTG_PWA_FRAME_SYNC_CONTROL, OTG_PWA_FRAME_SYNC_LINE, mask_sh),\
- SF(OTG0_INTERRUPT_DEST, OTG0_IHC_OTG_VERTICAL_INTERRUPT2_DEST, mask_sh)
+ SF(OTG0_INTERRUPT_DEST, OTG0_IHC_OTG_VERTICAL_INTERRUPT2_DEST, mask_sh),\
+ SF(OPTC_CLOCK_CONTROL, OPTC_FGCG_REP_DIS, mask_sh)
void dcn42_timing_generator_init(struct optc *optc1);
void optc42_enable_pwa(struct timing_generator *optc, struct otc_pwa_frame_sync *pwa_sync_param);
void optc42_disable_pwa(struct timing_generator *optc);
-
+void optc42_tg_init(struct timing_generator *optc);
+void optc42_clear_optc_underflow(struct timing_generator *optc);
+bool optc42_is_optc_underflow_occurred(struct timing_generator *optc);
+bool optc42_disable_crtc(struct timing_generator *optc);
+void optc42_lock_doublebuffer_enable(struct timing_generator *optc);
#endif /* __DC_OPTC_DCN42_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index 8b555187ac75..366576b1c617 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -2341,8 +2341,6 @@ static bool init_soc_bounding_box(struct dc *dc,
struct _vcs_dpi_ip_params_st *loaded_ip =
get_asic_rev_ip_params(dc->ctx->asic_id.hw_internal_rev);
- DC_LOGGER_INIT(dc->ctx->logger);
-
if (pool->base.pp_smu) {
struct pp_smu_nv_clock_table max_clocks = {0};
unsigned int uclk_states[8] = {0};
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn42/dcn42_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn42/dcn42_resource.c
index 8e41367cf238..aef187bcf5c3 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn42/dcn42_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn42/dcn42_resource.c
@@ -53,7 +53,7 @@
#include "dce/dce_audio.h"
#include "dce/dce_hwseq.h"
#include "clk_mgr.h"
-#include "virtual/virtual_stream_encoder.h"
+#include "dio/virtual/virtual_stream_encoder.h"
#include "dml/display_mode_vba.h"
#include "dcn42/dcn42_dccg.h"
#include "dcn10/dcn10_resource.h"
@@ -666,6 +666,7 @@ static const struct resource_caps res_cap_dcn42 = {
.num_vmid = 16,
.num_mpc_3dlut = 2,
.num_dsc = 4,
+ .num_rmcm = 2,
};
static const struct dc_plane_cap plane_cap = {
@@ -755,6 +756,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.dcc_meta_propagation_delay_us = 10,
.disable_timeout = true,
.min_disp_clk_khz = 50000,
+ .static_screen_wait_frames = 2,
.disable_z10 = false,
.ignore_pg = true,
.disable_stutter_for_wm_program = true,
@@ -2302,14 +2304,12 @@ static bool dcn42_resource_construct(
dc->dml2_options.max_segments_per_hubp = 24;
dc->dml2_options.det_segment_size = DCN42_CRB_SEGMENT_SIZE_KB;
+ dc->dml2_options.gpuvm_enable = true;
+ dc->dml2_options.hostvm_enable = true;
/* SPL */
dc->caps.scl_caps.sharpener_support = true;
- /* init DC limited DML2 options */
- memcpy(&dc->dml2_dc_power_options, &dc->dml2_options, sizeof(struct dml2_configuration_options));
- dc->dml2_dc_power_options.use_clock_dc_limits = true;
-
return true;
create_fail:
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile
index bc93356a0b5b..d168fb1eacf7 100644
--- a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/Makefile
@@ -8,11 +8,14 @@ soc_and_ip_translator_ccflags := $(CC_FLAGS_FPU)
soc_and_ip_translator_rcflags := $(CC_FLAGS_NO_FPU)
CFLAGS_$(AMDDALPATH)/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.o := $(soc_and_ip_translator_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.o := $(soc_and_ip_translator_ccflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.o := $(soc_and_ip_translator_rcflags)
+CFLAGS_REMOVE_$(AMDDALPATH)/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.o := $(soc_and_ip_translator_rcflags)
soc_and_ip_translator := soc_and_ip_translator.o
soc_and_ip_translator += dcn401/dcn401_soc_and_ip_translator.o
+soc_and_ip_translator += dcn42/dcn42_soc_and_ip_translator.o
AMD_DAL_soc_and_ip_translator := $(addprefix $(AMDDALPATH)/dc/soc_and_ip_translator/, $(soc_and_ip_translator))
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c
index 3190c76eb482..1b397fa7e05c 100644
--- a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.c
@@ -102,6 +102,9 @@ static void dcn401_convert_dc_clock_table_to_soc_bb_clock_table(
}
} else {
dml_clk_table->uclk.clk_values_khz[i] = dc_clk_table->entries[i].memclk_mhz * 1000;
+ #ifdef ENABLE_WCK
+ dml_clk_table->wck_ratio.clk_values_khz[i] = dc_clk_table->entries[i].wck_ratio;
+ #endif
}
} else {
dml_clk_table->uclk.clk_values_khz[i] = 0;
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c
index c9e224d262c9..fd9c24b5df53 100644
--- a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.c
@@ -3,7 +3,7 @@
// Copyright 2025 Advanced Micro Devices, Inc.
#include "dcn42_soc_and_ip_translator.h"
-#include "soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h"
+#include "../dcn401/dcn401_soc_and_ip_translator.h"
#include "bounding_boxes/dcn42_soc_bb.h"
/* soc_and_ip_translator component used to get up-to-date values for bounding box.
@@ -11,13 +11,171 @@
* This component provides an interface to get DCN-specific bounding box values.
*/
+static void get_default_soc_bb(struct dml2_soc_bb *soc_bb, const struct dc *dc)
+{
+ {
+ memcpy(soc_bb, &dml2_socbb_dcn42, sizeof(struct dml2_soc_bb));
+ memcpy(&soc_bb->qos_parameters, &dml_dcn42_variant_a_soc_qos_params, sizeof(struct dml2_soc_qos_parameters));
+ }
+}
+
+/*
+ * DC clock table is obtained from SMU during runtime.
+ * SMU stands for System Management Unit. It is a power management processor.
+ * It owns the initialization of dc's clock table and programming of clock values
+ * based on dc's requests.
+ * Our clock values in base soc bb is a dummy placeholder. The real clock values
+ * are retrieved from SMU firmware to dc clock table at runtime.
+ * This function overrides our dummy placeholder values with real values in dc
+ * clock table.
+ */
+static void dcn42_convert_dc_clock_table_to_soc_bb_clock_table(
+ struct dml2_soc_state_table *dml_clk_table,
+ struct dml2_soc_vmin_clock_limits *vmin_limit,
+ const struct clk_bw_params *dc_bw_params)
+{
+ int i;
+ const struct clk_limit_table *dc_clk_table;
+
+ if (dc_bw_params == NULL)
+ /* skip if bw params could not be obtained from smu */
+ return;
+
+ dc_clk_table = &dc_bw_params->clk_table;
+
+ /* fclk/dcfclk - dcn42 pmfw table can have 0 entries for inactive dpm levels
+ * for use with dml we need to fill in using an active value aiming for >= 2x DCFCLK
+ */
+ if (dc_clk_table->num_entries_per_clk.num_fclk_levels && dc_clk_table->num_entries_per_clk.num_dcfclk_levels) {
+ dml_clk_table->fclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dcfclk_levels;
+ dml_clk_table->dcfclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dcfclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dc_clk_table->num_entries_per_clk.num_dcfclk_levels) {
+ int j, max_fclk = 0;
+
+ dml_clk_table->dcfclk.clk_values_khz[i] = dc_clk_table->entries[i].dcfclk_mhz * 1000;
+ for (j = 0; j < MAX_NUM_DPM_LVL; j++) {
+ if (dc_clk_table->entries[j].fclk_mhz * 1000 > max_fclk)
+ max_fclk = dc_clk_table->entries[j].fclk_mhz * 1000;
+ dml_clk_table->fclk.clk_values_khz[i] = max_fclk;
+ if (max_fclk >= 2 * dml_clk_table->dcfclk.clk_values_khz[i])
+ break;
+ }
+ } else {
+ dml_clk_table->dcfclk.clk_values_khz[i] = 0;
+ dml_clk_table->fclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* uclk */
+ if (dc_clk_table->num_entries_per_clk.num_memclk_levels) {
+ dml_clk_table->uclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_memclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->uclk.num_clk_values) {
+ dml_clk_table->uclk.clk_values_khz[i] = dc_clk_table->entries[i].memclk_mhz * 1000;
+ dml_clk_table->wck_ratio.clk_values_khz[i] = dc_clk_table->entries[i].wck_ratio;
+ } else {
+ dml_clk_table->uclk.clk_values_khz[i] = 0;
+ dml_clk_table->wck_ratio.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* dispclk */
+ if (dc_clk_table->num_entries_per_clk.num_dispclk_levels) {
+ dml_clk_table->dispclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dispclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dispclk.num_clk_values) {
+ dml_clk_table->dispclk.clk_values_khz[i] = dc_clk_table->entries[i].dispclk_mhz * 1000;
+ } else {
+ dml_clk_table->dispclk.clk_values_khz[i] = 0;
+ }
+ }
+ vmin_limit->dispclk_khz = min(dc_clk_table->entries[0].dispclk_mhz * 1000, vmin_limit->dispclk_khz);
+ }
+
+ /* dppclk */
+ if (dc_clk_table->num_entries_per_clk.num_dppclk_levels) {
+ dml_clk_table->dppclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dppclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dppclk.num_clk_values) {
+ dml_clk_table->dppclk.clk_values_khz[i] = dc_clk_table->entries[i].dppclk_mhz * 1000;
+ } else {
+ dml_clk_table->dppclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* dtbclk */
+ if (dc_clk_table->num_entries_per_clk.num_dtbclk_levels) {
+ dml_clk_table->dtbclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_dtbclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->dtbclk.num_clk_values) {
+ dml_clk_table->dtbclk.clk_values_khz[i] = dc_clk_table->entries[i].dtbclk_mhz * 1000;
+ } else {
+ dml_clk_table->dtbclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* socclk */
+ if (dc_clk_table->num_entries_per_clk.num_socclk_levels) {
+ dml_clk_table->socclk.num_clk_values = dc_clk_table->num_entries_per_clk.num_socclk_levels;
+ for (i = 0; i < min(DML_MAX_CLK_TABLE_SIZE, MAX_NUM_DPM_LVL); i++) {
+ if (i < dml_clk_table->socclk.num_clk_values) {
+ dml_clk_table->socclk.clk_values_khz[i] = dc_clk_table->entries[i].socclk_mhz * 1000;
+ } else {
+ dml_clk_table->socclk.clk_values_khz[i] = 0;
+ }
+ }
+ }
+
+ /* dram config */
+ dml_clk_table->dram_config.channel_count = dc_bw_params->num_channels;
+ dml_clk_table->dram_config.channel_width_bytes = dc_bw_params->dram_channel_width_bytes;
+}
+
+static void dcn42_update_soc_bb_with_values_from_clk_mgr(struct dml2_soc_bb *soc_bb, const struct dc *dc)
+{
+ soc_bb->dprefclk_mhz = dc->clk_mgr->dprefclk_khz / 1000;
+ soc_bb->dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0;
+ soc_bb->mall_allocated_for_dcn_mbytes = dc->caps.mall_size_total / (1024 * 1024);
+
+ if (dc->clk_mgr->funcs->is_smu_present &&
+ dc->clk_mgr->funcs->is_smu_present(dc->clk_mgr)) {
+ dcn42_convert_dc_clock_table_to_soc_bb_clock_table(&soc_bb->clk_table, &soc_bb->vmin_limit,
+ dc->clk_mgr->bw_params);
+ }
+}
+
+static void apply_soc_bb_updates(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config)
+{
+ /* Individual modification can be overwritten even if it was obtained by a previous function.
+ * Modifications are acquired in order of priority (lowest to highest).
+ */
+ dc_assert_fp_enabled();
+
+ dcn42_update_soc_bb_with_values_from_clk_mgr(soc_bb, dc);
+ dcn401_update_soc_bb_with_values_from_vbios(soc_bb, dc);
+ dcn401_update_soc_bb_with_values_from_software_policy(soc_bb, dc);
+}
+
+void dcn42_get_soc_bb(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config)
+{
+ //get default soc_bb with static values
+ get_default_soc_bb(soc_bb, dc);
+ //update soc_bb values with more accurate values
+ apply_soc_bb_updates(soc_bb, dc, config);
+}
+
static void dcn42_get_ip_caps(struct dml2_ip_capabilities *ip_caps)
{
*ip_caps = dml2_dcn42_max_ip_caps;
}
static struct soc_and_ip_translator_funcs dcn42_translator_funcs = {
- .get_soc_bb = dcn401_get_soc_bb,
+ .get_soc_bb = dcn42_get_soc_bb,
.get_ip_caps = dcn42_get_ip_caps,
};
@@ -25,3 +183,4 @@ void dcn42_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and
{
soc_and_ip_translator->translator_funcs = &dcn42_translator_funcs;
}
+
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h
index 914dcbb369a7..1dded5426152 100644
--- a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h
@@ -12,5 +12,6 @@
#include "soc_and_ip_translator.h"
void dcn42_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator);
+void dcn42_get_soc_bb(struct dml2_soc_bb *soc_bb, const struct dc *dc, const struct dml2_configuration_options *config);
#endif /* _DCN42_SOC_AND_IP_TRANSLATOR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c
index 6617c9d2d5f8..bad0bd79fa94 100644
--- a/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c
+++ b/drivers/gpu/drm/amd/display/dc/soc_and_ip_translator/soc_and_ip_translator.c
@@ -4,6 +4,7 @@
#include "soc_and_ip_translator.h"
#include "soc_and_ip_translator/dcn401/dcn401_soc_and_ip_translator.h"
+#include "soc_and_ip_translator/dcn42/dcn42_soc_and_ip_translator.h"
static void dc_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc_and_ip_translator,
enum dce_version dc_version)
@@ -12,6 +13,9 @@ static void dc_construct_soc_and_ip_translator(struct soc_and_ip_translator *soc
case DCN_VERSION_4_01:
dcn401_construct_soc_and_ip_translator(soc_and_ip_translator);
break;
+ case DCN_VERSION_4_2:
+ dcn42_construct_soc_and_ip_translator(soc_and_ip_translator);
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index 2abbc6c97850..e11e32afac6b 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -909,7 +909,8 @@ union dmub_fw_meta_feature_bits {
struct {
uint32_t shared_state_link_detection : 1; /**< 1 supports link detection via shared state */
uint32_t cursor_offload_v1_support: 1; /**< 1 supports cursor offload */
- uint32_t reserved : 30;
+ uint32_t inbox0_lock_support: 1; /**< 1 supports inbox0 lock mechanism */
+ uint32_t reserved : 29;
} bits; /**< status bits */
uint32_t all; /**< 32-bit access to status bits */
};
@@ -1535,14 +1536,12 @@ enum dmub_gpint_command {
* 1 - Enable ips measurement
*/
DMUB_GPINT__IPS_RESIDENCY = 121,
-
/**
* DESC: Enable measurements for various task duration
* ARGS: 0 - Disable measurement
* 1 - Enable measurement
*/
DMUB_GPINT__TRACE_DMUB_WAKE_ACTIVITY = 123,
-
/**
* DESC: Gets IPS residency in microseconds
* ARGS: 0 - Return IPS1 residency
@@ -1552,21 +1551,18 @@ enum dmub_gpint_command {
* RETURN: Total residency in microseconds - lower 32 bits
*/
DMUB_GPINT__GET_IPS_RESIDENCY_DURATION_US_LO = 124,
-
/**
* DESC: Gets IPS1 histogram counts
* ARGS: Bucket index
* RETURN: Total count for the bucket
*/
DMUB_GPINT__GET_IPS1_HISTOGRAM_COUNTER = 125,
-
/**
* DESC: Gets IPS2 histogram counts
* ARGS: Bucket index
* RETURN: Total count for the bucket
*/
DMUB_GPINT__GET_IPS2_HISTOGRAM_COUNTER = 126,
-
/**
* DESC: Gets IPS residency
* ARGS: 0 - Return IPS1 residency
@@ -1576,21 +1572,18 @@ enum dmub_gpint_command {
* RETURN: Total residency in milli-percent.
*/
DMUB_GPINT__GET_IPS_RESIDENCY_PERCENT = 127,
-
/**
* DESC: Gets IPS1_RCG histogram counts
* ARGS: Bucket index
* RETURN: Total count for the bucket
*/
DMUB_GPINT__GET_IPS1_RCG_HISTOGRAM_COUNTER = 128,
-
/**
* DESC: Gets IPS1_ONO2_ON histogram counts
* ARGS: Bucket index
* RETURN: Total count for the bucket
*/
DMUB_GPINT__GET_IPS1_ONO2_ON_HISTOGRAM_COUNTER = 129,
-
/**
* DESC: Gets IPS entry counter during residency measurement
* ARGS: 0 - Return IPS1 entry counts
@@ -1600,7 +1593,6 @@ enum dmub_gpint_command {
* RETURN: Entry counter for selected IPS mode
*/
DMUB_GPINT__GET_IPS_RESIDENCY_ENTRY_COUNTER = 130,
-
/**
* DESC: Gets IPS inactive residency in microseconds
* ARGS: 0 - Return IPS1_MAX residency
@@ -1610,7 +1602,6 @@ enum dmub_gpint_command {
* RETURN: Total inactive residency in microseconds - lower 32 bits
*/
DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_LO = 131,
-
/**
* DESC: Gets IPS inactive residency in microseconds
* ARGS: 0 - Return IPS1_MAX residency
@@ -1620,7 +1611,6 @@ enum dmub_gpint_command {
* RETURN: Total inactive residency in microseconds - upper 32 bits
*/
DMUB_GPINT__GET_IPS_INACTIVE_RESIDENCY_DURATION_US_HI = 132,
-
/**
* DESC: Gets IPS residency in microseconds
* ARGS: 0 - Return IPS1 residency
@@ -1679,7 +1669,7 @@ union dmub_inbox0_cmd_lock_hw {
uint32_t lock: 1; /**< Lock */
uint32_t should_release: 1; /**< Release */
- uint32_t reserved: 7; /**< Reserved for extending more clients, HW, etc. */
+ uint32_t reserved: 7; /**< Reserved for extending more clients, HW, etc. */
} bits;
uint32_t all;
};
@@ -1903,6 +1893,11 @@ enum dmub_cmd_type {
DMUB_CMD__IHC = 95,
/**
+ * Command type use for boot time crc commands.
+ */
+ DMUB_CMD__BOOT_TIME_CRC = 96,
+
+ /**
* Command type use for VBIOS shared commands.
*/
DMUB_CMD__VBIOS = 128,
@@ -2614,9 +2609,9 @@ struct dmub_fams2_stream_static_state {
uint8_t allow_to_target_delay_otg_vlines; // time from allow vline to target vline
union {
struct {
- uint8_t is_drr: 1; // stream is DRR enabled
- uint8_t clamp_vtotal_min: 1; // clamp vtotal to min instead of nominal
- uint8_t min_ttu_vblank_usable: 1; // if min ttu vblank is above wm, no force pstate is needed in blank
+ uint8_t is_drr : 1; // stream is DRR enabled
+ uint8_t clamp_vtotal_min : 1; // clamp vtotal to min instead of nominal
+ uint8_t min_ttu_vblank_usable : 1; // if min ttu vblank is above wm, no force pstate is needed in blank
} bits;
uint8_t all;
} config;
@@ -4441,6 +4436,7 @@ enum dmub_cmd_replay_general_subtype {
REPLAY_GENERAL_CMD_SET_LOW_RR_ACTIVATE,
REPLAY_GENERAL_CMD_VIDEO_CONFERENCING,
REPLAY_GENERAL_CMD_SET_CONTINUOUSLY_RESYNC,
+ REPLAY_GENERAL_CMD_SET_COASTING_VTOTAL_WITHOUT_FRAME_UPDATE,
};
struct dmub_alpm_auxless_data {
@@ -4659,6 +4655,18 @@ struct dmub_rb_cmd_replay_enable_data {
* This does not support HDMI/DP2 for now.
*/
uint8_t phy_rate;
+ /**
+ * @hpo_stream_enc_inst: HPO stream encoder instance
+ */
+ uint8_t hpo_stream_enc_inst;
+ /**
+ * @hpo_link_enc_inst: HPO link encoder instance
+ */
+ uint8_t hpo_link_enc_inst;
+ /**
+ * @pad: Align structure to 4 byte boundary.
+ */
+ uint8_t pad[2];
};
/**
@@ -5272,8 +5280,8 @@ enum dmub_cmd_lsdma_type {
*/
DMUB_CMD__LSDMA_LINEAR_COPY = 1,
/**
- * LSDMA copies data from source to destination linearly in sub window
- */
+ * LSDMA copies data from source to destination linearly in sub window
+ */
DMUB_CMD__LSDMA_LINEAR_SUB_WINDOW_COPY = 2,
/**
* Send the tiled-to-tiled copy command
@@ -6836,6 +6844,29 @@ struct dmub_rb_cmd_pr_general_cmd {
};
/**
+ * Command type of a DMUB_CMD__BOOT_TIME_CRC command
+ */
+enum dmub_cmd_boot_time_crc_type {
+ DMUB_CMD__BOOT_TIME_CRC_INIT_MEM = 0
+};
+
+/**
+ * Data passed from driver to FW in a DMUB_CMD__BOOT_TIME_CRC_INIT command.
+ */
+struct dmub_cmd_boot_time_crc_init_data {
+ union dmub_addr buffer_addr;
+ uint32_t buffer_size;
+};
+
+/**
+ * Definition of a DMUB_CMD__BOOT_TIME_CRC_INIT command.
+ */
+struct dmub_rb_cmd_boot_time_crc_init {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_boot_time_crc_init_data data;
+};
+
+/**
* union dmub_rb_cmd - DMUB inbox command.
*/
union dmub_rb_cmd {
@@ -7196,6 +7227,10 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__IHC command.
*/
struct dmub_rb_cmd_ihc ihc;
+ /**
+ * Definition of a DMUB_CMD__BOOT_TIME_CRC_INIT command.
+ */
+ struct dmub_rb_cmd_boot_time_crc_init boot_time_crc_init;
};
/**
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
index 1afa10e85eb5..4a8ca0ac1266 100644
--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -64,6 +64,9 @@
#ifndef DP_PR_ERROR_STATUS // can remove this once the define gets into linux drm_dp_helper.h
#define DP_PR_ERROR_STATUS 0x2020 /* DP 2.0 */
#endif /* DP_PR_ERROR_STATUS */
+#ifndef DP_PR_REPLAY_SINK_STATUS // can remove this once the define gets into linux drm_dp_helper.h
+#define DP_PR_REPLAY_SINK_STATUS 0x2022
+#endif /* DP_PR_REPLAY_SINK_STATUS */
#ifndef DP_PR_LINK_CRC_ERROR // can remove this once the define gets into linux drm_dp_helper.h
#define DP_PR_LINK_CRC_ERROR (1 << 0)
#endif /* DP_PR_LINK_CRC_ERROR */
diff --git a/drivers/gpu/drm/amd/include/asic_reg/clk/clk_15_0_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/clk/clk_15_0_0_offset.h
new file mode 100644
index 000000000000..aa3ef3a34013
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/clk/clk_15_0_0_offset.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2026 Advanced Micro Devices, Inc. */
+
+#ifndef _clk_15_0_0_OFFSET_HEADER
+#define _clk_15_0_0_OFFSET_HEADER
+
+// addressBlock: clk_clk8_0_SmuClkDec
+// base address: 0x6e000
+#define regCLK8_CLK0_DS_CNTL 0x4c14
+#define regCLK8_CLK0_DS_CNTL_BASE_IDX 0
+#define regCLK8_CLK1_DS_CNTL 0x4c1c
+#define regCLK8_CLK1_DS_CNTL_BASE_IDX 0
+#define regCLK8_CLK2_DS_CNTL 0x4c24
+#define regCLK8_CLK2_DS_CNTL_BASE_IDX 0
+#define regCLK8_CLK3_DS_CNTL 0x4c2c
+#define regCLK8_CLK3_DS_CNTL_BASE_IDX 0
+#define regCLK8_CLK4_DS_CNTL 0x4c34
+#define regCLK8_CLK4_DS_CNTL_BASE_IDX 0
+#define regCLK8_CLK0_BYPASS_CNTL 0x4c1a
+#define regCLK8_CLK0_BYPASS_CNTL_BASE_IDX 0
+#define regCLK8_CLK1_BYPASS_CNTL 0x4c22
+#define regCLK8_CLK1_BYPASS_CNTL_BASE_IDX 0
+#define regCLK8_CLK2_BYPASS_CNTL 0x4c2a
+#define regCLK8_CLK2_BYPASS_CNTL_BASE_IDX 0
+#define regCLK8_CLK3_BYPASS_CNTL 0x4c32
+#define regCLK8_CLK3_BYPASS_CNTL_BASE_IDX 0
+#define regCLK8_CLK4_BYPASS_CNTL 0x4c3a
+#define regCLK8_CLK4_BYPASS_CNTL_BASE_IDX 0
+#define regCLK8_CLK_TICK_CNT_CONFIG_REG 0x4c51
+#define regCLK8_CLK_TICK_CNT_CONFIG_REG_BASE_IDX 0
+#define regCLK8_CLK_TICK_CNT_STATUS 0x4c52
+#define regCLK8_CLK_TICK_CNT_STATUS_BASE_IDX 0
+#define regCLK8_CLK0_CURRENT_CNT 0x4c53
+#define regCLK8_CLK0_CURRENT_CNT_BASE_IDX 0
+#define regCLK8_CLK1_CURRENT_CNT 0x4c54
+#define regCLK8_CLK1_CURRENT_CNT_BASE_IDX 0
+#define regCLK8_CLK2_CURRENT_CNT 0x4c55
+#define regCLK8_CLK2_CURRENT_CNT_BASE_IDX 0
+#define regCLK8_CLK3_CURRENT_CNT 0x4c56
+#define regCLK8_CLK3_CURRENT_CNT_BASE_IDX 0
+#define regCLK8_CLK4_CURRENT_CNT 0x4c57
+#define regCLK8_CLK4_CURRENT_CNT_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/clk/clk_15_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/clk/clk_15_0_0_sh_mask.h
new file mode 100644
index 000000000000..c78622d06e1b
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/clk/clk_15_0_0_sh_mask.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2026 Advanced Micro Devices, Inc. */
+
+#ifndef _clk_15_0_0_SH_MASK_HEADER
+#define _clk_15_0_0_SH_MASK_HEADER
+
+// addressBlock: clk_clk8_0_SmuClkDec
+//CLK8_CLK_TICK_CNT_CONFIG_REG
+#define CLK8_CLK_TICK_CNT_CONFIG_REG__TIMER_THRESHOLD__SHIFT 0x0
+#define CLK8_CLK_TICK_CNT_CONFIG_REG__TIMER_THRESHOLD_MASK 0xFFFFL
+//CLK8_CLK0_BYPASS_CNTL
+#define CLK8_CLK0_BYPASS_CNTL__CLK0_BYPASS_SEL__SHIFT 0x0
+#define CLK8_CLK0_BYPASS_CNTL__CLK0_BYPASS_SEL_MASK 0x00000007L
+//CLK8_CLK1_BYPASS_CNTL
+#define CLK8_CLK1_BYPASS_CNTL__CLK1_BYPASS_SEL__SHIFT 0x0
+#define CLK8_CLK1_BYPASS_CNTL__CLK1_BYPASS_SEL_MASK 0x00000007L
+//CLK8_CLK2_BYPASS_CNTL
+#define CLK8_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL__SHIFT 0x0
+#define CLK8_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L
+//CLK8_CLK3_BYPASS_CNTL
+#define CLK8_CLK3_BYPASS_CNTL__CLK3_BYPASS_SEL__SHIFT 0x0
+#define CLK8_CLK3_BYPASS_CNTL__CLK3_BYPASS_SEL_MASK 0x00000007L
+//CLK8_CLK4_BYPASS_CNTL
+#define CLK8_CLK4_BYPASS_CNTL__CLK4_BYPASS_SEL__SHIFT 0x0
+#define CLK8_CLK4_BYPASS_CNTL__CLK4_BYPASS_SEL_MASK 0x00000007L
+//CLK8_CLK0_DS_CNTL
+#define CLK8_CLK0_DS_CNTL__CLK0_DS_DIV_ID__SHIFT 0x0
+#define CLK8_CLK0_DS_CNTL__CLK0_DS_DIV_ID_MASK 0x0000000FL
+#define CLK8_CLK0_DS_CNTL__CLK0_ALLOW_DS__SHIFT 0x4
+#define CLK8_CLK0_DS_CNTL__CLK0_ALLOW_DS_MASK 0x00000010L
+//CLK8_CLK1_DS_CNTL
+#define CLK8_CLK1_DS_CNTL__CLK1_DS_DIV_ID__SHIFT 0x0
+#define CLK8_CLK1_DS_CNTL__CLK1_DS_DIV_ID_MASK 0x0000000FL
+#define CLK8_CLK1_DS_CNTL__CLK1_ALLOW_DS__SHIFT 0x4
+#define CLK8_CLK1_DS_CNTL__CLK1_ALLOW_DS_MASK 0x00000010L
+//CLK8_CLK2_DS_CNTL
+#define CLK8_CLK2_DS_CNTL__CLK2_DS_DIV_ID__SHIFT 0x0
+#define CLK8_CLK2_DS_CNTL__CLK2_DS_DIV_ID_MASK 0x0000000FL
+#define CLK8_CLK2_DS_CNTL__CLK2_ALLOW_DS__SHIFT 0x4
+#define CLK8_CLK2_DS_CNTL__CLK2_ALLOW_DS_MASK 0x00000010L
+//CLK8_CLK3_DS_CNTL
+#define CLK8_CLK3_DS_CNTL__CLK3_DS_DIV_ID__SHIFT 0x0
+#define CLK8_CLK3_DS_CNTL__CLK3_DS_DIV_ID_MASK 0x0000000FL
+#define CLK8_CLK3_DS_CNTL__CLK3_ALLOW_DS__SHIFT 0x4
+#define CLK8_CLK3_DS_CNTL__CLK3_ALLOW_DS_MASK 0x00000010L
+//CLK8_CLK4_DS_CNTL
+#define CLK8_CLK4_DS_CNTL__CLK4_DS_DIV_ID__SHIFT 0x0
+#define CLK8_CLK4_DS_CNTL__CLK4_DS_DIV_ID_MASK 0x0000000FL
+#define CLK8_CLK4_DS_CNTL__CLK4_ALLOW_DS__SHIFT 0x4
+#define CLK8_CLK4_DS_CNTL__CLK4_ALLOW_DS_MASK 0x00000010L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_2_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_2_0_offset.h
index 825201f4e113..52fbf2dc1899 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_2_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_2_0_offset.h
@@ -9010,6 +9010,8 @@
// base address: 0x0
#define regODM0_OPTC_INPUT_GLOBAL_CONTROL 0x1aca
#define regODM0_OPTC_INPUT_GLOBAL_CONTROL_BASE_IDX 2
+#define regODM0_OPTC_RSMU_UNDERFLOW 0x1acb
+#define regODM0_OPTC_RSMU_UNDERFLOW_BASE_IDX 2
#define regODM0_OPTC_UNDERFLOW_THRESHOLD 0x1acc
#define regODM0_OPTC_UNDERFLOW_THRESHOLD_BASE_IDX 2
#define regODM0_OPTC_DATA_SOURCE_SELECT 0x1acd
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_2_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_2_0_sh_mask.h
index 4ed96244f61b..01fb53093369 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_2_0_sh_mask.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dcn/dcn_4_2_0_sh_mask.h
@@ -33583,6 +33583,15 @@
#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_CLEAR_MASK 0x00001000L
#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_UNDERFLOW_OCCURRED_CURRENT_MASK 0x00002000L
#define ODM0_OPTC_INPUT_GLOBAL_CONTROL__OPTC_DOUBLE_BUFFER_PENDING_MASK 0x80000000L
+//ODM0_OPTC_RSMU_UNDERFLOW
+#define ODM0_OPTC_RSMU_UNDERFLOW__OPTC_RSMU_UNDERFLOW_INT_EN__SHIFT 0x0
+#define ODM0_OPTC_RSMU_UNDERFLOW__OPTC_RSMU_UNDERFLOW_OCCURRED_STATUS__SHIFT 0x1
+#define ODM0_OPTC_RSMU_UNDERFLOW__OPTC_RSMU_UNDERFLOW_INT_STATUS__SHIFT 0x2
+#define ODM0_OPTC_RSMU_UNDERFLOW__OPTC_RSMU_UNDERFLOW_CLEAR__SHIFT 0x3
+#define ODM0_OPTC_RSMU_UNDERFLOW__OPTC_RSMU_UNDERFLOW_INT_EN_MASK 0x00000001L
+#define ODM0_OPTC_RSMU_UNDERFLOW__OPTC_RSMU_UNDERFLOW_OCCURRED_STATUS_MASK 0x00000002L
+#define ODM0_OPTC_RSMU_UNDERFLOW__OPTC_RSMU_UNDERFLOW_INT_STATUS_MASK 0x00000004L
+#define ODM0_OPTC_RSMU_UNDERFLOW__OPTC_RSMU_UNDERFLOW_CLEAR_MASK 0x00000008L
//ODM0_OPTC_UNDERFLOW_THRESHOLD
#define ODM0_OPTC_UNDERFLOW_THRESHOLD__OPTC_UNDERFLOW_THRESHOLD__SHIFT 0x0
#define ODM0_OPTC_UNDERFLOW_THRESHOLD__OPTC_UNDERFLOW_THRESHOLD_MASK 0x01FFFFFFL
diff --git a/drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_1_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_1_0_offset.h
new file mode 100644
index 000000000000..fa4e42a3ae9f
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_1_0_offset.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright 2026 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _lsdma_7_1_0_OFFSET_HEADER
+#define _lsdma_7_1_0_OFFSET_HEADER
+
+#define regLSDMA_PIO_SRC_ADDR_LO 0x0080
+#define regLSDMA_PIO_SRC_ADDR_LO_BASE_IDX 0
+#define regLSDMA_PIO_SRC_ADDR_HI 0x0081
+#define regLSDMA_PIO_SRC_ADDR_HI_BASE_IDX 0
+#define regLSDMA_PIO_DST_ADDR_LO 0x0082
+#define regLSDMA_PIO_DST_ADDR_LO_BASE_IDX 0
+#define regLSDMA_PIO_DST_ADDR_HI 0x0083
+#define regLSDMA_PIO_DST_ADDR_HI_BASE_IDX 0
+#define regLSDMA_PIO_COMMAND 0x0084
+#define regLSDMA_PIO_COMMAND_BASE_IDX 0
+#define regLSDMA_PIO_CONSTFILL_DATA 0x0085
+#define regLSDMA_PIO_CONSTFILL_DATA_BASE_IDX 0
+#define regLSDMA_PIO_CONTROL 0x0086
+#define regLSDMA_PIO_CONTROL_BASE_IDX 0
+
+#define regLSDMA_PIO_STATUS 0x008a
+#define regLSDMA_PIO_STATUS_BASE_IDX 0
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_1_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_1_0_sh_mask.h
new file mode 100644
index 000000000000..cf83dacf4acf
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/lsdma/lsdma_7_1_0_sh_mask.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright 2026 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _lsdma_7_1_0_SH_MASK_HEADER
+#define _lsdma_7_1_0_SH_MASK_HEADER
+
+
+// addressBlock: lsdma0_lsdma0dec
+//LSDMA_PIO_STATUS
+#define LSDMA_PIO_STATUS__CMD_IN_FIFO__SHIFT 0x0
+#define LSDMA_PIO_STATUS__CMD_PROCESSING__SHIFT 0x3
+#define LSDMA_PIO_STATUS__ERROR_INVALID_ADDR__SHIFT 0xb
+#define LSDMA_PIO_STATUS__ERROR_ZERO_COUNT__SHIFT 0xc
+#define LSDMA_PIO_STATUS__ERROR_DRAM_ECC__SHIFT 0xd
+#define LSDMA_PIO_STATUS__ERROR_SRAM_ECC__SHIFT 0xe
+#define LSDMA_PIO_STATUS__ERROR_WRRET_NACK_GEN_ERR__SHIFT 0xf
+#define LSDMA_PIO_STATUS__ERROR_RDRET_NACK_GEN_ERR__SHIFT 0x10
+#define LSDMA_PIO_STATUS__ERROR_WRRET_NACK_PRT__SHIFT 0x11
+#define LSDMA_PIO_STATUS__ERROR_RDRET_NACK_PRT__SHIFT 0x12
+#define LSDMA_PIO_STATUS__ERROR_REQ_DROP__SHIFT 0x13
+#define LSDMA_PIO_STATUS__PIO_FIFO_EMPTY__SHIFT 0x1c
+#define LSDMA_PIO_STATUS__PIO_FIFO_FULL__SHIFT 0x1d
+#define LSDMA_PIO_STATUS__PIO_IDLE__SHIFT 0x1f
+#define LSDMA_PIO_STATUS__CMD_IN_FIFO_MASK 0x00000007L
+#define LSDMA_PIO_STATUS__CMD_PROCESSING_MASK 0x000003F8L
+#define LSDMA_PIO_STATUS__ERROR_INVALID_ADDR_MASK 0x00000800L
+#define LSDMA_PIO_STATUS__ERROR_ZERO_COUNT_MASK 0x00001000L
+#define LSDMA_PIO_STATUS__ERROR_DRAM_ECC_MASK 0x00002000L
+#define LSDMA_PIO_STATUS__ERROR_SRAM_ECC_MASK 0x00004000L
+#define LSDMA_PIO_STATUS__ERROR_WRRET_NACK_GEN_ERR_MASK 0x00008000L
+#define LSDMA_PIO_STATUS__ERROR_RDRET_NACK_GEN_ERR_MASK 0x00010000L
+#define LSDMA_PIO_STATUS__ERROR_WRRET_NACK_PRT_MASK 0x00020000L
+#define LSDMA_PIO_STATUS__ERROR_RDRET_NACK_PRT_MASK 0x00040000L
+#define LSDMA_PIO_STATUS__ERROR_REQ_DROP_MASK 0x00080000L
+#define LSDMA_PIO_STATUS__PIO_FIFO_EMPTY_MASK 0x10000000L
+#define LSDMA_PIO_STATUS__PIO_FIFO_FULL_MASK 0x20000000L
+#define LSDMA_PIO_STATUS__PIO_IDLE_MASK 0x80000000L
+//LSDMA_PIO_SRC_ADDR_LO
+#define LSDMA_PIO_SRC_ADDR_LO__SRC_ADDR_LO__SHIFT 0x0
+#define LSDMA_PIO_SRC_ADDR_LO__SRC_ADDR_LO_MASK 0xFFFFFFFFL
+//LSDMA_PIO_SRC_ADDR_HI
+#define LSDMA_PIO_SRC_ADDR_HI__SRC_ADDR_HI__SHIFT 0x0
+#define LSDMA_PIO_SRC_ADDR_HI__SRC_ADDR_HI_MASK 0xFFFFFFFFL
+//LSDMA_PIO_DST_ADDR_LO
+#define LSDMA_PIO_DST_ADDR_LO__DST_ADDR_LO__SHIFT 0x0
+#define LSDMA_PIO_DST_ADDR_LO__DST_ADDR_LO_MASK 0xFFFFFFFFL
+//LSDMA_PIO_DST_ADDR_HI
+#define LSDMA_PIO_DST_ADDR_HI__DST_ADDR_HI__SHIFT 0x0
+#define LSDMA_PIO_DST_ADDR_HI__DST_ADDR_HI_MASK 0xFFFFFFFFL
+//LSDMA_PIO_CONTROL
+#define LSDMA_PIO_CONTROL__VMID__SHIFT 0x0
+#define LSDMA_PIO_CONTROL__DST_GPA__SHIFT 0x4
+#define LSDMA_PIO_CONTROL__DST_SYS__SHIFT 0x5
+#define LSDMA_PIO_CONTROL__DST_GCC__SHIFT 0x6
+#define LSDMA_PIO_CONTROL__DST_SNOOP__SHIFT 0x7
+#define LSDMA_PIO_CONTROL__DST_REUSE_HINT__SHIFT 0x8
+#define LSDMA_PIO_CONTROL__DST_COMP_EN__SHIFT 0xa
+#define LSDMA_PIO_CONTROL__SRC_GPA__SHIFT 0x14
+#define LSDMA_PIO_CONTROL__SRC_SYS__SHIFT 0x15
+#define LSDMA_PIO_CONTROL__SRC_SNOOP__SHIFT 0x17
+#define LSDMA_PIO_CONTROL__SRC_REUSE_HINT__SHIFT 0x18
+#define LSDMA_PIO_CONTROL__SRC_COMP_EN__SHIFT 0x1a
+#define LSDMA_PIO_CONTROL__VMID_MASK 0x0000000FL
+#define LSDMA_PIO_CONTROL__DST_GPA_MASK 0x00000010L
+#define LSDMA_PIO_CONTROL__DST_SYS_MASK 0x00000020L
+#define LSDMA_PIO_CONTROL__DST_GCC_MASK 0x00000040L
+#define LSDMA_PIO_CONTROL__DST_SNOOP_MASK 0x00000080L
+#define LSDMA_PIO_CONTROL__DST_REUSE_HINT_MASK 0x00000300L
+#define LSDMA_PIO_CONTROL__DST_COMP_EN_MASK 0x00000400L
+#define LSDMA_PIO_CONTROL__SRC_GPA_MASK 0x00100000L
+#define LSDMA_PIO_CONTROL__SRC_SYS_MASK 0x00200000L
+#define LSDMA_PIO_CONTROL__SRC_SNOOP_MASK 0x00800000L
+#define LSDMA_PIO_CONTROL__SRC_REUSE_HINT_MASK 0x03000000L
+#define LSDMA_PIO_CONTROL__SRC_COMP_EN_MASK 0x04000000L
+//LSDMA_PIO_COMMAND
+#define LSDMA_PIO_COMMAND__COUNT__SHIFT 0x0
+#define LSDMA_PIO_COMMAND__RAW_WAIT__SHIFT 0x1e
+#define LSDMA_PIO_COMMAND__CONSTANT_FILL__SHIFT 0x1f
+#define LSDMA_PIO_COMMAND__COUNT_MASK 0x03FFFFFFL
+#define LSDMA_PIO_COMMAND__RAW_WAIT_MASK 0x40000000L
+#define LSDMA_PIO_COMMAND__CONSTANT_FILL_MASK 0x80000000L
+//LSDMA_PIO_CONSTFILL_DATA
+#define LSDMA_PIO_CONSTFILL_DATA__DATA__SHIFT 0x0
+#define LSDMA_PIO_CONSTFILL_DATA__DATA_MASK 0xFFFFFFFFL
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/discovery.h b/drivers/gpu/drm/amd/include/discovery.h
index 710e328fad48..76c9f951bc1c 100644
--- a/drivers/gpu/drm/amd/include/discovery.h
+++ b/drivers/gpu/drm/amd/include/discovery.h
@@ -64,6 +64,19 @@ typedef struct binary_header
table_info table_list[TOTAL_TABLES];
} binary_header;
+typedef struct binary_header_v2
+{
+ /* psp structure should go at the top of this structure */
+ uint32_t binary_signature; /* 0x7, 0x14, 0x21, 0x28 */
+ uint16_t version_major; /* 0x02 */
+ uint16_t version_minor;
+ uint16_t binary_checksum; /* Byte sum of the binary after this field */
+ uint16_t binary_size; /* Binary Size*/
+ uint16_t num_tables;
+ uint16_t padding;
+ table_info table_list[] __counted_by(num_tables);
+} binary_header_v2;
+
typedef struct die_info
{
uint16_t die_id;
diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
index bdf8e6ff556c..a9b73f4fd466 100644
--- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h
@@ -584,6 +584,9 @@ enum amdgpu_metrics_attr_id {
AMDGPU_METRICS_ATTR_ID_GFX_BELOW_HOST_LIMIT_THM_ACC,
AMDGPU_METRICS_ATTR_ID_GFX_LOW_UTILIZATION_ACC,
AMDGPU_METRICS_ATTR_ID_GFX_BELOW_HOST_LIMIT_TOTAL_ACC,
+ AMDGPU_METRICS_ATTR_ID_TEMPERATURE_HBM,
+ AMDGPU_METRICS_ATTR_ID_TEMPERATURE_AID,
+ AMDGPU_METRICS_ATTR_ID_TEMPERATURE_XCD,
AMDGPU_METRICS_ATTR_ID_MAX,
};
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index b32c053950c9..a8d63d4d1f6e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2222,7 +2222,8 @@ static int smu_v13_0_0_restore_user_od_settings(struct smu_context *smu)
user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) |
BIT(PP_OD_FEATURE_UCLK_BIT) |
BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) |
- BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ BIT(PP_OD_FEATURE_FAN_CURVE_BIT) |
+ BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
res = smu_v13_0_0_upload_overdrive_table(smu, user_od_table);
user_od_table->OverDriveTable.FeatureCtrlMask = 0;
if (res == 0)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
index 32d5e2170d80..54a86eb77cd5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_12_ppt.c
@@ -49,6 +49,13 @@
#undef pr_info
#undef pr_debug
+#define hbm_stack_mask_valid(umc_mask) \
+ (((umc_mask) & 0x3) == 0x3)
+
+#define for_each_hbm_stack(stack_idx, umc_mask) \
+ for ((stack_idx) = 0; (umc_mask); \
+ (umc_mask) >>= 2, (stack_idx)++) \
+
#define SMU_13_0_12_FEA_MAP(smu_feature, smu_13_0_12_feature) \
[smu_feature] = { 1, (smu_13_0_12_feature) }
@@ -262,8 +269,9 @@ static void smu_v13_0_12_init_xgmi_data(struct smu_context *smu,
int ret;
if (smu_table->tables[SMU_TABLE_SMU_METRICS].version >= 0x13) {
- max_width = (uint8_t)static_metrics->MaxXgmiWidth;
- max_speed = (uint16_t)static_metrics->MaxXgmiBitrate;
+ max_width = (uint8_t)SMUQ10_ROUND(static_metrics->MaxXgmiWidth);
+ max_speed =
+ (uint16_t)SMUQ10_ROUND(static_metrics->MaxXgmiBitrate);
ret = 0;
} else {
MetricsTable_t *metrics = (MetricsTable_t *)smu_table->metrics_table;
@@ -834,7 +842,7 @@ void smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table,
struct smu_v13_0_6_gpu_metrics *gpu_metrics)
{
struct amdgpu_device *adev = smu->adev;
- int ret = 0, xcc_id, inst, i, j;
+ int ret = 0, xcc_id, inst, i, j, idx;
u8 num_jpeg_rings_gpu_metrics;
MetricsTable_t *metrics;
@@ -849,6 +857,31 @@ void smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table,
gpu_metrics->temperature_vrsoc =
SMUQ10_ROUND(metrics->MaxVrTemperature);
+ if (smu_v13_0_6_cap_supported(smu,
+ SMU_CAP(TEMP_AID_XCD_HBM))) {
+ if (adev->umc.active_mask) {
+ u64 mask = adev->umc.active_mask;
+ int out_idx = 0;
+ int stack_idx;
+
+ if (unlikely(hweight64(mask) / 2 > SMU_13_0_6_MAX_HBM_STACKS)) {
+ dev_warn(adev->dev, "Invalid umc mask %lld\n", mask);
+ } else {
+ for_each_hbm_stack(stack_idx, mask) {
+ if (!hbm_stack_mask_valid(mask))
+ continue;
+ gpu_metrics->temperature_hbm[out_idx++] =
+ metrics->HbmTemperature[stack_idx];
+ }
+ }
+ }
+ idx = 0;
+ for_each_inst(i, adev->aid_mask) {
+ gpu_metrics->temperature_aid[idx] = metrics->AidTemperature[i];
+ idx++;
+ }
+ }
+
gpu_metrics->average_gfx_activity =
SMUQ10_ROUND(metrics->SocketGfxBusy);
gpu_metrics->average_umc_activity =
@@ -964,6 +997,9 @@ void smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table,
[i] = SMUQ10_ROUND(
metrics->GfxclkBelowHostLimitTotalAcc[inst]);
}
+ if (smu_v13_0_6_cap_supported(smu,
+ SMU_CAP(TEMP_AID_XCD_HBM)))
+ gpu_metrics->temperature_xcd[i] = metrics->XcdTemperature[inst];
}
gpu_metrics->xgmi_link_width = metrics->XgmiWidth;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
index 2512a8ff6836..8dc8674b7ce1 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
@@ -373,6 +373,9 @@ static void smu_v13_0_12_init_caps(struct smu_context *smu)
} else {
smu_v13_0_12_tables_fini(smu);
}
+
+ if (fw_ver >= 0x04561000)
+ smu_v13_0_6_cap_set(smu, SMU_CAP(TEMP_AID_XCD_HBM));
}
static void smu_v13_0_6_init_caps(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
index ffb06564f830..a150fc88902c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.h
@@ -78,6 +78,7 @@ enum smu_v13_0_6_caps {
SMU_CAP(RAS_EEPROM),
SMU_CAP(FAST_PPT),
SMU_CAP(SYSTEM_POWER_METRICS),
+ SMU_CAP(TEMP_AID_XCD_HBM),
SMU_CAP(ALL),
};
@@ -87,6 +88,8 @@ enum smu_v13_0_6_caps {
#define SMU_13_0_6_MAX_XCC 8
#define SMU_13_0_6_MAX_VCN 4
#define SMU_13_0_6_MAX_JPEG 40
+#define SMU_13_0_6_MAX_AID 4
+#define SMU_13_0_6_MAX_HBM_STACKS 8
extern void smu_v13_0_6_set_ppt_funcs(struct smu_context *smu);
bool smu_v13_0_6_cap_supported(struct smu_context *smu, enum smu_v13_0_6_caps cap);
@@ -222,7 +225,15 @@ extern const struct ras_smu_drv smu_v13_0_12_ras_smu_drv;
SMU_13_0_6_MAX_XCC); \
SMU_ARRAY(SMU_MATTR(GFX_BELOW_HOST_LIMIT_TOTAL_ACC), SMU_MUNIT(NONE), \
SMU_MTYPE(U64), gfx_below_host_limit_total_acc, \
- SMU_13_0_6_MAX_XCC);
+ SMU_13_0_6_MAX_XCC); \
+ SMU_ARRAY(SMU_MATTR(TEMPERATURE_HBM), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_hbm, \
+ SMU_13_0_6_MAX_HBM_STACKS); \
+ SMU_ARRAY(SMU_MATTR(TEMPERATURE_AID), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_aid, SMU_13_0_6_MAX_AID); \
+ SMU_ARRAY(SMU_MATTR(TEMPERATURE_XCD), SMU_MUNIT(TEMP_1), \
+ SMU_MTYPE(U16), temperature_xcd, SMU_13_0_6_MAX_XCC); \
+
DECLARE_SMU_METRICS_CLASS(smu_v13_0_6_gpu_metrics, SMU_13_0_6_METRICS_FIELDS);
void smu_v13_0_12_get_gpu_metrics(struct smu_context *smu, void **table,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index f08cfa510a8a..5500a0f12f0e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -2224,7 +2224,8 @@ static int smu_v13_0_7_restore_user_od_settings(struct smu_context *smu)
user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) |
BIT(PP_OD_FEATURE_UCLK_BIT) |
BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) |
- BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ BIT(PP_OD_FEATURE_FAN_CURVE_BIT) |
+ BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
res = smu_v13_0_7_upload_overdrive_table(smu, user_od_table);
user_od_table->OverDriveTable.FeatureCtrlMask = 0;
if (res == 0)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 9994d4369da8..73762d9b5969 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -2311,7 +2311,8 @@ static int smu_v14_0_2_restore_user_od_settings(struct smu_context *smu)
user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) |
BIT(PP_OD_FEATURE_UCLK_BIT) |
BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) |
- BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ BIT(PP_OD_FEATURE_FAN_CURVE_BIT) |
+ BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
res = smu_v14_0_2_upload_overdrive_table(smu, user_od_table);
user_od_table->OverDriveTable.FeatureCtrlMask = 0;
if (res == 0)
diff --git a/drivers/gpu/drm/bridge/analogix/Kconfig b/drivers/gpu/drm/bridge/analogix/Kconfig
index f3448b0631fe..03dc7ffe824a 100644
--- a/drivers/gpu/drm/bridge/analogix/Kconfig
+++ b/drivers/gpu/drm/bridge/analogix/Kconfig
@@ -34,7 +34,8 @@ config DRM_ANALOGIX_ANX7625
tristate "Analogix Anx7625 MIPI to DP interface support"
depends on DRM
depends on OF
- depends on TYPEC || !TYPEC
+ depends on TYPEC
+ depends on USB_ROLE_SWITCH
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDCP_HELPER
select DRM_DISPLAY_HELPER
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index c43519097a45..54b02242d6db 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -1363,6 +1363,18 @@ static void anx7625_configure_hpd(struct anx7625_data *ctx)
anx7625_hpd_timer_config(ctx);
}
+static bool anx7625_need_pd(struct anx7625_data *ctx)
+{
+ struct fwnode_handle *fwnode;
+
+ fwnode = device_get_named_child_node(ctx->dev, "connector");
+ if (!fwnode)
+ return false;
+
+ fwnode_handle_put(fwnode);
+ return true;
+}
+
static int anx7625_ocm_loading_check(struct anx7625_data *ctx)
{
int ret;
@@ -1378,7 +1390,7 @@ static int anx7625_ocm_loading_check(struct anx7625_data *ctx)
if ((ret & FLASH_LOAD_STA_CHK) != FLASH_LOAD_STA_CHK)
return -ENODEV;
- if (!ctx->typec_port)
+ if (!anx7625_need_pd(ctx))
anx7625_disable_pd_protocol(ctx);
anx7625_configure_hpd(ctx);
@@ -1483,7 +1495,6 @@ static void anx7625_start_dp_work(struct anx7625_data *ctx)
DRM_DEV_DEBUG_DRIVER(dev, "Secure OCM version=%02x\n", ret);
}
-#if IS_REACHABLE(CONFIG_TYPEC)
static u8 anx7625_checksum(u8 *buf, u8 len)
{
u8 ret = 0;
@@ -1567,6 +1578,9 @@ static void anx7625_typec_set_status(struct anx7625_data *ctx,
unsigned int intr_status,
unsigned int intr_vector)
{
+ if (!ctx->typec_port)
+ return;
+
if (intr_vector & CC_STATUS)
anx7625_typec_set_orientation(ctx);
if (intr_vector & DATA_ROLE_STATUS) {
@@ -1635,22 +1649,6 @@ static void anx7625_typec_unregister(struct anx7625_data *ctx)
usb_role_switch_put(ctx->role_sw);
typec_unregister_port(ctx->typec_port);
}
-#else
-static void anx7625_typec_set_status(struct anx7625_data *ctx,
- unsigned int intr_status,
- unsigned int intr_vector)
-{
-}
-
-static int anx7625_typec_register(struct anx7625_data *ctx)
-{
- return 0;
-}
-
-static void anx7625_typec_unregister(struct anx7625_data *ctx)
-{
-}
-#endif
static int anx7625_read_hpd_status_p0(struct anx7625_data *ctx)
{
@@ -2924,12 +2922,7 @@ static int anx7625_i2c_probe(struct i2c_client *client)
}
if (!platform->pdata.low_power_mode) {
- struct fwnode_handle *fwnode;
-
- fwnode = device_get_named_child_node(dev, "connector");
- if (fwnode)
- fwnode_handle_put(fwnode);
- else
+ if (!anx7625_need_pd(platform))
anx7625_disable_pd_protocol(platform);
anx7625_configure_hpd(platform);
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index 9392c226ff5b..945bb47c172b 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -739,12 +739,8 @@ static void cdns_mhdp_fw_cb(const struct firmware *fw, void *context)
spin_lock(&mhdp->start_lock);
bridge_attached = mhdp->bridge_attached;
spin_unlock(&mhdp->start_lock);
- if (bridge_attached) {
- if (mhdp->connector.dev)
- drm_kms_helper_hotplug_event(mhdp->bridge.dev);
- else
- drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp));
- }
+ if (bridge_attached)
+ drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp));
}
static int cdns_mhdp_load_firmware(struct cdns_mhdp_device *mhdp)
@@ -782,7 +778,7 @@ static ssize_t cdns_mhdp_transfer(struct drm_dp_aux *aux,
if (!ret)
continue;
- dev_err(mhdp->dev,
+ dev_dbg(mhdp->dev,
"Failed to write DPCD addr %u\n",
msg->address + i);
@@ -792,7 +788,7 @@ static ssize_t cdns_mhdp_transfer(struct drm_dp_aux *aux,
ret = cdns_mhdp_dpcd_read(mhdp, msg->address,
msg->buffer, msg->size);
if (ret) {
- dev_err(mhdp->dev,
+ dev_dbg(mhdp->dev,
"Failed to read DPCD addr %u\n",
msg->address);
@@ -1444,56 +1440,6 @@ static const struct drm_edid *cdns_mhdp_edid_read(struct cdns_mhdp_device *mhdp,
return drm_edid_read_custom(connector, cdns_mhdp_get_edid_block, mhdp);
}
-static int cdns_mhdp_get_modes(struct drm_connector *connector)
-{
- struct cdns_mhdp_device *mhdp = connector_to_mhdp(connector);
- const struct drm_edid *drm_edid;
- int num_modes;
-
- if (!mhdp->plugged)
- return 0;
-
- drm_edid = cdns_mhdp_edid_read(mhdp, connector);
-
- drm_edid_connector_update(connector, drm_edid);
-
- if (!drm_edid) {
- dev_err(mhdp->dev, "Failed to read EDID\n");
- return 0;
- }
-
- num_modes = drm_edid_connector_add_modes(connector);
- drm_edid_free(drm_edid);
-
- /*
- * HACK: Warn about unsupported display formats until we deal
- * with them correctly.
- */
- if (connector->display_info.color_formats &&
- !(connector->display_info.color_formats &
- mhdp->display_fmt.color_format))
- dev_warn(mhdp->dev,
- "%s: No supported color_format found (0x%08x)\n",
- __func__, connector->display_info.color_formats);
-
- if (connector->display_info.bpc &&
- connector->display_info.bpc < mhdp->display_fmt.bpc)
- dev_warn(mhdp->dev, "%s: Display bpc only %d < %d\n",
- __func__, connector->display_info.bpc,
- mhdp->display_fmt.bpc);
-
- return num_modes;
-}
-
-static int cdns_mhdp_connector_detect(struct drm_connector *conn,
- struct drm_modeset_acquire_ctx *ctx,
- bool force)
-{
- struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
-
- return cdns_mhdp_detect(mhdp);
-}
-
static u32 cdns_mhdp_get_bpp(struct cdns_mhdp_display_fmt *fmt)
{
u32 bpp;
@@ -1547,114 +1493,6 @@ bool cdns_mhdp_bandwidth_ok(struct cdns_mhdp_device *mhdp,
return true;
}
-static
-enum drm_mode_status cdns_mhdp_mode_valid(struct drm_connector *conn,
- const struct drm_display_mode *mode)
-{
- struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
-
- mutex_lock(&mhdp->link_mutex);
-
- if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
- mhdp->link.rate)) {
- mutex_unlock(&mhdp->link_mutex);
- return MODE_CLOCK_HIGH;
- }
-
- mutex_unlock(&mhdp->link_mutex);
- return MODE_OK;
-}
-
-static int cdns_mhdp_connector_atomic_check(struct drm_connector *conn,
- struct drm_atomic_state *state)
-{
- struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
- struct drm_connector_state *old_state, *new_state;
- struct drm_crtc_state *crtc_state;
- u64 old_cp, new_cp;
-
- if (!mhdp->hdcp_supported)
- return 0;
-
- old_state = drm_atomic_get_old_connector_state(state, conn);
- new_state = drm_atomic_get_new_connector_state(state, conn);
- old_cp = old_state->content_protection;
- new_cp = new_state->content_protection;
-
- if (old_state->hdcp_content_type != new_state->hdcp_content_type &&
- new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
- new_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- goto mode_changed;
- }
-
- if (!new_state->crtc) {
- if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
- new_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- return 0;
- }
-
- if (old_cp == new_cp ||
- (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
- new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
- return 0;
-
-mode_changed:
- crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
- crtc_state->mode_changed = true;
-
- return 0;
-}
-
-static const struct drm_connector_helper_funcs cdns_mhdp_conn_helper_funcs = {
- .detect_ctx = cdns_mhdp_connector_detect,
- .get_modes = cdns_mhdp_get_modes,
- .mode_valid = cdns_mhdp_mode_valid,
- .atomic_check = cdns_mhdp_connector_atomic_check,
-};
-
-static const struct drm_connector_funcs cdns_mhdp_conn_funcs = {
- .fill_modes = drm_helper_probe_single_connector_modes,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
- .reset = drm_atomic_helper_connector_reset,
- .destroy = drm_connector_cleanup,
-};
-
-static int cdns_mhdp_connector_init(struct cdns_mhdp_device *mhdp)
-{
- u32 bus_format = MEDIA_BUS_FMT_RGB121212_1X36;
- struct drm_connector *conn = &mhdp->connector;
- struct drm_bridge *bridge = &mhdp->bridge;
- int ret;
-
- conn->polled = DRM_CONNECTOR_POLL_HPD;
-
- ret = drm_connector_init(bridge->dev, conn, &cdns_mhdp_conn_funcs,
- DRM_MODE_CONNECTOR_DisplayPort);
- if (ret) {
- dev_err(mhdp->dev, "Failed to initialize connector with drm\n");
- return ret;
- }
-
- drm_connector_helper_add(conn, &cdns_mhdp_conn_helper_funcs);
-
- ret = drm_display_info_set_bus_formats(&conn->display_info,
- &bus_format, 1);
- if (ret)
- return ret;
-
- ret = drm_connector_attach_encoder(conn, bridge->encoder);
- if (ret) {
- dev_err(mhdp->dev, "Failed to attach connector to encoder\n");
- return ret;
- }
-
- if (mhdp->hdcp_supported)
- ret = drm_connector_attach_content_protection_property(conn, true);
-
- return ret;
-}
-
static int cdns_mhdp_attach(struct drm_bridge *bridge,
struct drm_encoder *encoder,
enum drm_bridge_attach_flags flags)
@@ -1671,9 +1509,11 @@ static int cdns_mhdp_attach(struct drm_bridge *bridge,
return ret;
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
- ret = cdns_mhdp_connector_init(mhdp);
- if (ret)
- goto aux_unregister;
+ ret = -EINVAL;
+ dev_err(mhdp->dev,
+ "Connector initialisation not supported in bridge_attach %d\n",
+ ret);
+ goto aux_unregister;
}
spin_lock(&mhdp->start_lock);
@@ -1915,17 +1755,25 @@ static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
struct cdns_mhdp_bridge_state *mhdp_state;
struct drm_crtc_state *crtc_state;
- struct drm_connector *connector;
struct drm_connector_state *conn_state;
struct drm_bridge_state *new_state;
const struct drm_display_mode *mode;
u32 resp;
- int ret;
+ int ret = 0;
dev_dbg(mhdp->dev, "bridge enable\n");
mutex_lock(&mhdp->link_mutex);
+ mhdp->connector = drm_atomic_get_new_connector_for_encoder(state,
+ bridge->encoder);
+ if (WARN_ON(!mhdp->connector))
+ goto out;
+
+ conn_state = drm_atomic_get_new_connector_state(state, mhdp->connector);
+ if (WARN_ON(!conn_state))
+ goto out;
+
if (mhdp->plugged && !mhdp->link_up) {
ret = cdns_mhdp_link_up(mhdp);
if (ret < 0)
@@ -1945,15 +1793,6 @@ static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
cdns_mhdp_reg_write(mhdp, CDNS_DPTX_CAR,
resp | CDNS_VIF_CLK_EN | CDNS_VIF_CLK_RSTN);
- connector = drm_atomic_get_new_connector_for_encoder(state,
- bridge->encoder);
- if (WARN_ON(!connector))
- goto out;
-
- conn_state = drm_atomic_get_new_connector_state(state, connector);
- if (WARN_ON(!conn_state))
- goto out;
-
if (mhdp->hdcp_supported &&
mhdp->hw_state == MHDP_HW_READY &&
conn_state->content_protection ==
@@ -2030,6 +1869,7 @@ static void cdns_mhdp_atomic_disable(struct drm_bridge *bridge,
if (mhdp->info && mhdp->info->ops && mhdp->info->ops->disable)
mhdp->info->ops->disable(mhdp);
+ mhdp->connector = NULL;
mutex_unlock(&mhdp->link_mutex);
}
@@ -2122,6 +1962,10 @@ static int cdns_mhdp_atomic_check(struct drm_bridge *bridge,
{
struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
const struct drm_display_mode *mode = &crtc_state->adjusted_mode;
+ struct drm_connector_state *old_state, *new_state;
+ struct drm_atomic_state *state = crtc_state->state;
+ struct drm_connector *conn = mhdp->connector;
+ u64 old_cp, new_cp;
mutex_lock(&mhdp->link_mutex);
@@ -2141,6 +1985,25 @@ static int cdns_mhdp_atomic_check(struct drm_bridge *bridge,
if (mhdp->info)
bridge_state->input_bus_cfg.flags = *mhdp->info->input_bus_flags;
+ if (conn && mhdp->hdcp_supported) {
+ old_state = drm_atomic_get_old_connector_state(state, conn);
+ new_state = drm_atomic_get_new_connector_state(state, conn);
+ old_cp = old_state->content_protection;
+ new_cp = new_state->content_protection;
+
+ if (old_state->hdcp_content_type != new_state->hdcp_content_type &&
+ new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ new_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
+ crtc_state->mode_changed = true;
+ }
+
+ if (!new_state->crtc) {
+ if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ new_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ }
+ }
+
mutex_unlock(&mhdp->link_mutex);
return 0;
}
@@ -2161,6 +2024,25 @@ static const struct drm_edid *cdns_mhdp_bridge_edid_read(struct drm_bridge *brid
return cdns_mhdp_edid_read(mhdp, connector);
}
+static enum drm_mode_status
+cdns_mhdp_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ struct cdns_mhdp_device *mhdp = bridge_to_mhdp(bridge);
+
+ mutex_lock(&mhdp->link_mutex);
+
+ if (!cdns_mhdp_bandwidth_ok(mhdp, mode, mhdp->link.num_lanes,
+ mhdp->link.rate)) {
+ mutex_unlock(&mhdp->link_mutex);
+ return MODE_CLOCK_HIGH;
+ }
+
+ mutex_unlock(&mhdp->link_mutex);
+ return MODE_OK;
+}
+
static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = {
.atomic_enable = cdns_mhdp_atomic_enable,
.atomic_disable = cdns_mhdp_atomic_disable,
@@ -2175,6 +2057,7 @@ static const struct drm_bridge_funcs cdns_mhdp_bridge_funcs = {
.edid_read = cdns_mhdp_bridge_edid_read,
.hpd_enable = cdns_mhdp_bridge_hpd_enable,
.hpd_disable = cdns_mhdp_bridge_hpd_disable,
+ .mode_valid = cdns_mhdp_bridge_mode_valid,
};
static bool cdns_mhdp_detect_hpd(struct cdns_mhdp_device *mhdp, bool *hpd_pulse)
@@ -2296,7 +2179,7 @@ static void cdns_mhdp_modeset_retry_fn(struct work_struct *work)
mhdp = container_of(work, typeof(*mhdp), modeset_retry_work);
- conn = &mhdp->connector;
+ conn = mhdp->connector;
/* Grab the locks before changing connector property */
mutex_lock(&conn->dev->mode_config.mutex);
@@ -2370,17 +2253,9 @@ static void cdns_mhdp_hpd_work(struct work_struct *work)
struct cdns_mhdp_device *mhdp = container_of(work,
struct cdns_mhdp_device,
hpd_work);
- int ret;
+ cdns_mhdp_update_link_status(mhdp);
- ret = cdns_mhdp_update_link_status(mhdp);
- if (mhdp->connector.dev) {
- if (ret < 0)
- schedule_work(&mhdp->modeset_retry_work);
- else
- drm_kms_helper_hotplug_event(mhdp->bridge.dev);
- } else {
- drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp));
- }
+ drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp));
}
static int cdns_mhdp_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h
index bad2fc0c7306..b297db53ba28 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h
@@ -375,7 +375,7 @@ struct cdns_mhdp_device {
*/
struct mutex link_mutex;
- struct drm_connector connector;
+ struct drm_connector *connector;
struct drm_bridge bridge;
struct cdns_mhdp_link link;
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
index 42248f179b69..5cd0b873e16f 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
@@ -394,7 +394,7 @@ static int _cdns_mhdp_hdcp_disable(struct cdns_mhdp_device *mhdp)
int ret;
dev_dbg(mhdp->dev, "[%s:%d] HDCP is being disabled...\n",
- mhdp->connector.name, mhdp->connector.base.id);
+ mhdp->connector->name, mhdp->connector->base.id);
ret = cdns_mhdp_hdcp_set_config(mhdp, 0, false);
@@ -436,6 +436,10 @@ static int cdns_mhdp_hdcp_check_link(struct cdns_mhdp_device *mhdp)
int ret = 0;
mutex_lock(&mhdp->hdcp.mutex);
+
+ if (!mhdp->connector)
+ goto out;
+
if (mhdp->hdcp.value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
goto out;
@@ -445,7 +449,7 @@ static int cdns_mhdp_hdcp_check_link(struct cdns_mhdp_device *mhdp)
dev_err(mhdp->dev,
"[%s:%d] HDCP link failed, retrying authentication\n",
- mhdp->connector.name, mhdp->connector.base.id);
+ mhdp->connector->name, mhdp->connector->base.id);
ret = _cdns_mhdp_hdcp_disable(mhdp);
if (ret) {
@@ -487,13 +491,19 @@ static void cdns_mhdp_hdcp_prop_work(struct work_struct *work)
struct cdns_mhdp_device *mhdp = container_of(hdcp,
struct cdns_mhdp_device,
hdcp);
- struct drm_device *dev = mhdp->connector.dev;
+ struct drm_device *dev = NULL;
struct drm_connector_state *state;
+ if (mhdp->connector)
+ dev = mhdp->connector->dev;
+
+ if (!dev)
+ return;
+
drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
mutex_lock(&mhdp->hdcp.mutex);
if (mhdp->hdcp.value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
- state = mhdp->connector.state;
+ state = mhdp->connector->state;
state->content_protection = mhdp->hdcp.value;
}
mutex_unlock(&mhdp->hdcp.mutex);
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 845c63ca15b5..c4adad77c8d6 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -37,6 +37,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
@@ -1771,3 +1772,32 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
+
+/**
+ * drm_fb_helper_gem_is_fb - Tests if GEM object is framebuffer
+ * @fb_helper: fb_helper instance, can be NULL
+ * @obj: The GEM object to test, can be NULL
+ *
+ * Call drm_fb_helper_gem_is_fb to test is a DRM device's fbdev emulation
+ * uses the specified GEM object for its framebuffer. The result is always
+ * false if either poiner is NULL.
+ *
+ * Returns:
+ * True if fbdev emulation uses the provided GEM object, or false otherwise.
+ */
+bool drm_fb_helper_gem_is_fb(const struct drm_fb_helper *fb_helper,
+ const struct drm_gem_object *obj)
+{
+ const struct drm_gem_object *gem = NULL;
+
+ if (!fb_helper || !obj)
+ return false;
+ if (fb_helper->buffer && fb_helper->buffer->gem)
+ gem = fb_helper->buffer->gem;
+ else if (fb_helper->fb)
+ gem = drm_gem_fb_get_obj(fb_helper->fb, 0);
+
+ return gem == obj;
+}
+EXPORT_SYMBOL_GPL(drm_fb_helper_gem_is_fb);
+
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index 7b5a49935ae4..4500deef4127 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -265,6 +265,8 @@ void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem)
shmem->pages_mark_dirty_on_put,
shmem->pages_mark_accessed_on_put);
shmem->pages = NULL;
+ shmem->pages_mark_accessed_on_put = false;
+ shmem->pages_mark_dirty_on_put = false;
}
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_put_pages_locked);
@@ -397,6 +399,8 @@ int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem,
} else {
iosys_map_set_vaddr(map, shmem->vaddr);
refcount_set(&shmem->vmap_use_count, 1);
+ shmem->pages_mark_accessed_on_put = true;
+ shmem->pages_mark_dirty_on_put = true;
}
}
@@ -550,59 +554,59 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
}
EXPORT_SYMBOL_GPL(drm_gem_shmem_dumb_create);
-static bool drm_gem_shmem_try_map_pmd(struct vm_fault *vmf, unsigned long addr,
- struct page *page)
+static vm_fault_t drm_gem_shmem_try_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn)
{
#ifdef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP
- unsigned long pfn = page_to_pfn(page);
unsigned long paddr = pfn << PAGE_SHIFT;
- bool aligned = (addr & ~PMD_MASK) == (paddr & ~PMD_MASK);
+ bool aligned = (vmf->address & ~PMD_MASK) == (paddr & ~PMD_MASK);
- if (aligned &&
- pmd_none(*vmf->pmd) &&
- folio_test_pmd_mappable(page_folio(page))) {
+ if (aligned && pmd_none(*vmf->pmd)) {
+ /* Read-only mapping; split upon write fault */
pfn &= PMD_MASK >> PAGE_SHIFT;
- if (vmf_insert_pfn_pmd(vmf, pfn, false) == VM_FAULT_NOPAGE)
- return true;
+ return vmf_insert_pfn_pmd(vmf, pfn, false);
}
#endif
- return false;
+ return 0;
}
static vm_fault_t drm_gem_shmem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
+ struct drm_device *dev = obj->dev;
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
loff_t num_pages = obj->size >> PAGE_SHIFT;
- vm_fault_t ret;
+ vm_fault_t ret = VM_FAULT_SIGBUS;
struct page **pages = shmem->pages;
- pgoff_t page_offset;
+ pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within VMA */
+ struct page *page;
+ struct folio *folio;
unsigned long pfn;
- /* Offset to faulty address in the VMA. */
- page_offset = vmf->pgoff - vma->vm_pgoff;
-
- dma_resv_lock(shmem->base.resv, NULL);
+ dma_resv_lock(obj->resv, NULL);
- if (page_offset >= num_pages ||
- drm_WARN_ON_ONCE(obj->dev, !shmem->pages) ||
- shmem->madv < 0) {
- ret = VM_FAULT_SIGBUS;
+ if (page_offset >= num_pages || drm_WARN_ON_ONCE(dev, !shmem->pages) ||
+ shmem->madv < 0)
goto out;
- }
- if (drm_gem_shmem_try_map_pmd(vmf, vmf->address, pages[page_offset])) {
- ret = VM_FAULT_NOPAGE;
+ page = pages[page_offset];
+ if (drm_WARN_ON_ONCE(dev, !page))
goto out;
- }
+ folio = page_folio(page);
- pfn = page_to_pfn(pages[page_offset]);
- ret = vmf_insert_pfn(vma, vmf->address, pfn);
+ pfn = page_to_pfn(page);
- out:
- dma_resv_unlock(shmem->base.resv);
+ if (folio_test_pmd_mappable(folio))
+ ret = drm_gem_shmem_try_insert_pfn_pmd(vmf, pfn);
+ if (ret != VM_FAULT_NOPAGE)
+ ret = vmf_insert_pfn(vma, vmf->address, pfn);
+
+ if (ret == VM_FAULT_NOPAGE)
+ folio_mark_accessed(folio);
+
+out:
+ dma_resv_unlock(obj->resv);
return ret;
}
@@ -641,10 +645,29 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma)
drm_gem_vm_close(vma);
}
+static vm_fault_t drm_gem_shmem_pfn_mkwrite(struct vm_fault *vmf)
+{
+ struct vm_area_struct *vma = vmf->vma;
+ struct drm_gem_object *obj = vma->vm_private_data;
+ struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
+ loff_t num_pages = obj->size >> PAGE_SHIFT;
+ pgoff_t page_offset = vmf->pgoff - vma->vm_pgoff; /* page offset within VMA */
+
+ if (drm_WARN_ON(obj->dev, !shmem->pages || page_offset >= num_pages))
+ return VM_FAULT_SIGBUS;
+
+ file_update_time(vma->vm_file);
+
+ folio_mark_dirty(page_folio(shmem->pages[page_offset]));
+
+ return 0;
+}
+
const struct vm_operations_struct drm_gem_shmem_vm_ops = {
.fault = drm_gem_shmem_fault,
.open = drm_gem_shmem_vm_open,
.close = drm_gem_shmem_vm_close,
+ .pfn_mkwrite = drm_gem_shmem_pfn_mkwrite,
};
EXPORT_SYMBOL_GPL(drm_gem_shmem_vm_ops);
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 715433187212..be976a90c5a6 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -76,9 +76,13 @@ i915-$(CONFIG_PERF_EVENTS) += \
# core display adaptation
i915-y += \
+ i915_bo.o \
i915_display_pc8.o \
+ i915_dpt.o \
+ i915_dsb_buffer.o \
i915_hdcp_gsc.o \
i915_initial_plane.o \
+ i915_overlay.o \
i915_panic.o
# "Graphics Technology" (aka we talk to the gpu)
@@ -270,13 +274,10 @@ i915-y += \
display/intel_dpll.o \
display/intel_dpll_mgr.o \
display/intel_dpt.o \
- display/intel_dpt_common.o \
display/intel_dram.o \
display/intel_drrs.o \
display/intel_dsb.o \
- display/intel_dsb_buffer.o \
display/intel_fb.o \
- display/intel_fb_bo.o \
display/intel_fb_pin.o \
display/intel_fbc.o \
display/intel_fdi.o \
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index 4cb753177fd8..d7de329abf19 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -10,7 +10,6 @@
#include <drm/drm_print.h>
#include "g4x_dp.h"
-#include "i915_reg.h"
#include "intel_audio.h"
#include "intel_backlight.h"
#include "intel_connector.h"
diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c
index 8b22447e8e23..5fe5067c4237 100644
--- a/drivers/gpu/drm/i915/display/g4x_hdmi.c
+++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c
@@ -8,7 +8,6 @@
#include <drm/drm_print.h>
#include "g4x_hdmi.h"
-#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_connector.h"
diff --git a/drivers/gpu/drm/i915/display/hsw_ips.c b/drivers/gpu/drm/i915/display/hsw_ips.c
index 008d339d5c21..cbaef3f13f00 100644
--- a/drivers/gpu/drm/i915/display/hsw_ips.c
+++ b/drivers/gpu/drm/i915/display/hsw_ips.c
@@ -6,15 +6,15 @@
#include <linux/debugfs.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_pcode_regs.h>
#include "hsw_ips.h"
-#include "i915_reg.h"
#include "intel_color_regs.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
-#include "intel_pcode.h"
+#include "intel_parent.h"
static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
{
@@ -39,8 +39,8 @@ static void hsw_ips_enable(const struct intel_crtc_state *crtc_state)
if (display->platform.broadwell) {
drm_WARN_ON(display->drm,
- intel_pcode_write(display->drm, DISPLAY_IPS_CONTROL,
- val | IPS_PCODE_CONTROL));
+ intel_parent_pcode_write(display, DISPLAY_IPS_CONTROL,
+ val | IPS_PCODE_CONTROL));
/*
* Quoting Art Runyan: "its not safe to expect any particular
* value in IPS_CTL bit 31 after enabling IPS through the
@@ -72,7 +72,7 @@ bool hsw_ips_disable(const struct intel_crtc_state *crtc_state)
if (display->platform.broadwell) {
drm_WARN_ON(display->drm,
- intel_pcode_write(display->drm, DISPLAY_IPS_CONTROL, 0));
+ intel_parent_pcode_write(display, DISPLAY_IPS_CONTROL, 0));
/*
* Wait for PCODE to finish disabling IPS. The BSpec specified
* 42ms timeout value leads to occasional timeouts so use 100ms
diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c
index b1fecf178906..9c16753a1f3b 100644
--- a/drivers/gpu/drm/i915/display/i9xx_plane.c
+++ b/drivers/gpu/drm/i915/display/i9xx_plane.c
@@ -10,7 +10,6 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "i9xx_plane.h"
#include "i9xx_plane_regs.h"
#include "intel_atomic.h"
diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c
index 39dfceb438ae..9e170e79dcf6 100644
--- a/drivers/gpu/drm/i915/display/i9xx_wm.c
+++ b/drivers/gpu/drm/i915/display/i9xx_wm.c
@@ -6,8 +6,8 @@
#include <linux/iopoll.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_gmd_misc_regs.h>
-#include "i915_reg.h"
#include "i9xx_wm.h"
#include "i9xx_wm_regs.h"
#include "intel_atomic.h"
@@ -182,8 +182,8 @@ static bool _intel_set_memory_cxsr(struct intel_display *display, bool enable)
intel_de_posting_read(display, DSPFW3(display));
} else if (display->platform.i945g || display->platform.i945gm) {
was_enabled = intel_de_read(display, FW_BLC_SELF) & FW_BLC_SELF_EN;
- val = enable ? _MASKED_BIT_ENABLE(FW_BLC_SELF_EN) :
- _MASKED_BIT_DISABLE(FW_BLC_SELF_EN);
+ val = enable ? REG_MASKED_FIELD_ENABLE(FW_BLC_SELF_EN) :
+ REG_MASKED_FIELD_DISABLE(FW_BLC_SELF_EN);
intel_de_write(display, FW_BLC_SELF, val);
intel_de_posting_read(display, FW_BLC_SELF);
} else if (display->platform.i915gm) {
@@ -193,8 +193,8 @@ static bool _intel_set_memory_cxsr(struct intel_display *display, bool enable)
* FW_BLC_SELF. What's going on?
*/
was_enabled = intel_de_read(display, INSTPM) & INSTPM_SELF_EN;
- val = enable ? _MASKED_BIT_ENABLE(INSTPM_SELF_EN) :
- _MASKED_BIT_DISABLE(INSTPM_SELF_EN);
+ val = enable ? REG_MASKED_FIELD_ENABLE(INSTPM_SELF_EN) :
+ REG_MASKED_FIELD_DISABLE(INSTPM_SELF_EN);
intel_de_write(display, INSTPM, val);
intel_de_posting_read(display, INSTPM);
} else {
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index fc265f71d72b..c04327979678 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -34,7 +34,6 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_reg.h"
#include "icl_dsi.h"
#include "icl_dsi_regs.h"
#include "intel_atomic.h"
@@ -1624,12 +1623,6 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
if (crtc_state->pipe_bpp < 8 * 3)
return -EINVAL;
- /* FIXME: split only when necessary */
- if (crtc_state->dsc.slice_count > 1)
- crtc_state->dsc.num_streams = 2;
- else
- crtc_state->dsc.num_streams = 1;
-
/* FIXME: initialize from VBT */
vdsc_cfg->rc_model_size = DSC_RC_MODEL_SIZE_CONST;
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.c b/drivers/gpu/drm/i915/display/intel_alpm.c
index 07ffee38974b..a7350ce8e716 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.c
+++ b/drivers/gpu/drm/i915/display/intel_alpm.c
@@ -15,6 +15,7 @@
#include "intel_dp_aux.h"
#include "intel_psr.h"
#include "intel_psr_regs.h"
+#include "intel_vrr.h"
#define SILENCE_PERIOD_MIN_TIME 80
#define SILENCE_PERIOD_MAX_TIME 180
@@ -43,12 +44,6 @@ bool intel_alpm_is_alpm_aux_less(struct intel_dp *intel_dp,
void intel_alpm_init(struct intel_dp *intel_dp)
{
- u8 dpcd;
-
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, &dpcd) < 0)
- return;
-
- intel_dp->alpm_dpcd = dpcd;
mutex_init(&intel_dp->alpm.lock);
}
@@ -248,14 +243,87 @@ bool intel_alpm_compute_params(struct intel_dp *intel_dp,
return true;
}
+int intel_alpm_lobf_min_guardband(struct intel_crtc_state *crtc_state)
+{
+ struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ int first_sdp_position = adjusted_mode->crtc_vtotal -
+ adjusted_mode->crtc_vsync_start;
+ int waketime_in_lines;
+
+ /*
+ * #FIXME: Need to check if io_wake_lines or aux_less_wake_lines
+ * is applicable. Currently this information is not readily
+ * available in crtc_state, so max will suffice for now.
+ */
+ waketime_in_lines = max(crtc_state->alpm_state.io_wake_lines,
+ crtc_state->alpm_state.aux_less_wake_lines);
+
+ if (!crtc_state->has_lobf)
+ return 0;
+
+ return first_sdp_position + waketime_in_lines + crtc_state->set_context_latency;
+}
+
+static bool intel_alpm_lobf_is_window1_sufficient(struct intel_crtc_state *crtc_state)
+{
+ struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ int vblank = adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay;
+ int window1;
+
+ /*
+ * LOBF must be disabled if the number of lines within Window 1 is not
+ * greater than ALPM_CTL[ALPM Entry Check]
+ */
+ window1 = vblank - min(vblank,
+ crtc_state->vrr.guardband +
+ crtc_state->set_context_latency);
+
+ return window1 > crtc_state->alpm_state.check_entry_lines;
+}
+
+void intel_alpm_lobf_compute_config_late(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
+ int waketime_in_lines, first_sdp_position;
+
+ if (!crtc_state->has_lobf)
+ return;
+
+ if (!intel_alpm_lobf_is_window1_sufficient(crtc_state)) {
+ crtc_state->has_lobf = false;
+ return;
+ }
+
+ /*
+ * LOBF can only be enabled if the time from the start of the SCL+Guardband
+ * window to the position of the first SDP is greater than the time it takes
+ * to wake the main link.
+ *
+ * Position of first sdp : vsync_start
+ * start of scl + guardband : vtotal - (scl + guardband)
+ * time in lines to wake main link : waketime_in_lines
+ *
+ * Position of first sdp - start of (scl + guardband) > time in lines to wake main link
+ * vsync_start - (vtotal - (scl + guardband)) > waketime_in_lines
+ * vsync_start - vtotal + scl + guardband > waketime_in_lines
+ * scl + guardband > waketime_in_lines + (vtotal - vsync_start)
+ */
+ first_sdp_position = adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vsync_start;
+ if (intel_alpm_aux_less_wake_supported(intel_dp))
+ waketime_in_lines = crtc_state->alpm_state.io_wake_lines;
+ else
+ waketime_in_lines = crtc_state->alpm_state.aux_less_wake_lines;
+
+ crtc_state->has_lobf = (crtc_state->set_context_latency + crtc_state->vrr.guardband) >
+ (first_sdp_position + waketime_in_lines);
+}
+
void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
- int waketime_in_lines, first_sdp_position;
- int context_latency, guardband;
if (intel_dp->alpm.lobf_disable_debug) {
drm_dbg_kms(display->drm, "LOBF is disabled by debug flag\n");
@@ -277,8 +345,8 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
if (crtc_state->has_psr)
return;
- if (crtc_state->vrr.vmin != crtc_state->vrr.vmax ||
- crtc_state->vrr.vmin != crtc_state->vrr.flipline)
+ if (!intel_vrr_always_use_vrr_tg(display) ||
+ !intel_vrr_is_fixed_rr(crtc_state))
return;
if (!(intel_alpm_aux_wake_supported(intel_dp) ||
@@ -288,17 +356,7 @@ void intel_alpm_lobf_compute_config(struct intel_dp *intel_dp,
if (!intel_alpm_compute_params(intel_dp, crtc_state))
return;
- context_latency = adjusted_mode->crtc_vblank_start - adjusted_mode->crtc_vdisplay;
- guardband = adjusted_mode->crtc_vtotal -
- adjusted_mode->crtc_vdisplay - context_latency;
- first_sdp_position = adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vsync_start;
- if (intel_alpm_aux_less_wake_supported(intel_dp))
- waketime_in_lines = crtc_state->alpm_state.io_wake_lines;
- else
- waketime_in_lines = crtc_state->alpm_state.aux_less_wake_lines;
-
- crtc_state->has_lobf = (context_latency + guardband) >
- (first_sdp_position + waketime_in_lines);
+ crtc_state->has_lobf = true;
}
static void lnl_alpm_configure(struct intel_dp *intel_dp,
@@ -388,25 +446,14 @@ void intel_alpm_port_configure(struct intel_dp *intel_dp,
intel_de_write(display, PORT_ALPM_LFPS_CTL(port), lfps_ctl_val);
}
-void intel_alpm_pre_plane_update(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+void intel_alpm_lobf_disable(const struct intel_crtc_state *new_crtc_state)
{
- struct intel_display *display = to_intel_display(state);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
- enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+ struct intel_display *display = to_intel_display(new_crtc_state);
+ enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
struct intel_encoder *encoder;
- if (DISPLAY_VER(display) < 20)
- return;
-
- if (crtc_state->has_lobf || crtc_state->has_lobf == old_crtc_state->has_lobf)
- return;
-
for_each_intel_encoder_mask(display->drm, encoder,
- crtc_state->uapi.encoder_mask) {
+ new_crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp;
if (!intel_encoder_is_dp(encoder))
@@ -417,12 +464,10 @@ void intel_alpm_pre_plane_update(struct intel_atomic_state *state,
if (!intel_dp_is_edp(intel_dp))
continue;
- if (old_crtc_state->has_lobf) {
- mutex_lock(&intel_dp->alpm.lock);
- intel_de_write(display, ALPM_CTL(display, cpu_transcoder), 0);
- drm_dbg_kms(display->drm, "Link off between frames (LOBF) disabled\n");
- mutex_unlock(&intel_dp->alpm.lock);
- }
+ mutex_lock(&intel_dp->alpm.lock);
+ intel_de_write(display, ALPM_CTL(display, cpu_transcoder), 0);
+ drm_dbg_kms(display->drm, "Link off between frames (LOBF) disabled\n");
+ mutex_unlock(&intel_dp->alpm.lock);
}
}
@@ -443,22 +488,13 @@ void intel_alpm_enable_sink(struct intel_dp *intel_dp,
drm_dp_dpcd_writeb(&intel_dp->aux, DP_RECEIVER_ALPM_CONFIG, val);
}
-void intel_alpm_post_plane_update(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+void intel_alpm_lobf_enable(const struct intel_crtc_state *new_crtc_state)
{
- struct intel_display *display = to_intel_display(state);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_crtc_state *old_crtc_state =
- intel_atomic_get_old_crtc_state(state, crtc);
+ struct intel_display *display = to_intel_display(new_crtc_state);
struct intel_encoder *encoder;
- if (crtc_state->has_psr || !crtc_state->has_lobf ||
- crtc_state->has_lobf == old_crtc_state->has_lobf)
- return;
-
for_each_intel_encoder_mask(display->drm, encoder,
- crtc_state->uapi.encoder_mask) {
+ new_crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp;
if (!intel_encoder_is_dp(encoder))
@@ -467,8 +503,8 @@ void intel_alpm_post_plane_update(struct intel_atomic_state *state,
intel_dp = enc_to_intel_dp(encoder);
if (intel_dp_is_edp(intel_dp)) {
- intel_alpm_enable_sink(intel_dp, crtc_state);
- intel_alpm_configure(intel_dp, crtc_state);
+ intel_alpm_enable_sink(intel_dp, new_crtc_state);
+ intel_alpm_configure(intel_dp, new_crtc_state);
}
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.h b/drivers/gpu/drm/i915/display/intel_alpm.h
index c6a4ec5b9561..1cf70668ab1b 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.h
+++ b/drivers/gpu/drm/i915/display/intel_alpm.h
@@ -25,12 +25,10 @@ void intel_alpm_configure(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
void intel_alpm_enable_sink(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
-void intel_alpm_pre_plane_update(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
+void intel_alpm_lobf_disable(const struct intel_crtc_state *new_crtc_state);
void intel_alpm_port_configure(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
-void intel_alpm_post_plane_update(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
+void intel_alpm_lobf_enable(const struct intel_crtc_state *new_crtc_state);
void intel_alpm_lobf_debugfs_add(struct intel_connector *connector);
bool intel_alpm_aux_wake_supported(struct intel_dp *intel_dp);
bool intel_alpm_aux_less_wake_supported(struct intel_dp *intel_dp);
@@ -38,4 +36,7 @@ bool intel_alpm_is_alpm_aux_less(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state);
void intel_alpm_disable(struct intel_dp *intel_dp);
bool intel_alpm_get_error(struct intel_dp *intel_dp);
+void intel_alpm_lobf_compute_config_late(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state);
+int intel_alpm_lobf_min_guardband(struct intel_crtc_state *crtc_state);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 5f3c175afdd2..081627e0d917 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -37,6 +37,7 @@
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_types.h"
+#include "intel_display_wa.h"
#include "intel_lpe_audio.h"
/**
@@ -184,17 +185,6 @@ static const struct hdmi_aud_ncts hdmi_aud_ncts_36bpp[] = {
{ 192000, TMDS_445_5M, 20480, 371250 },
};
-/*
- * WA_14020863754: Implement Audio Workaround
- * Corner case with Min Hblank Fix can cause audio hang
- */
-static bool needs_wa_14020863754(struct intel_display *display)
-{
- return DISPLAY_VERx100(display) == 3000 ||
- DISPLAY_VERx100(display) == 2000 ||
- DISPLAY_VERx100(display) == 1401;
-}
-
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state)
{
@@ -440,7 +430,11 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
intel_de_rmw(display, HSW_AUD_PIN_ELD_CP_VLD,
AUDIO_OUTPUT_ENABLE(cpu_transcoder), 0);
- if (needs_wa_14020863754(display))
+ /*
+ * WA_14020863754: Implement Audio Workaround
+ * Corner case with Min Hblank Fix can cause audio hang
+ */
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_14020863754))
intel_de_rmw(display, AUD_CHICKENBIT_REG3, DACBE_DISABLE_MIN_HBLANK_FIX, 0);
intel_audio_sdp_split_update(old_crtc_state, false);
@@ -572,7 +566,11 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
intel_audio_sdp_split_update(crtc_state, true);
- if (needs_wa_14020863754(display))
+ /*
+ * WA_14020863754: Implement Audio Workaround
+ * Corner case with Min Hblank Fix can cause audio hang
+ */
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_14020863754))
intel_de_rmw(display, AUD_CHICKENBIT_REG3, 0, DACBE_DISABLE_MIN_HBLANK_FIX);
/* Enable audio presence detect */
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index a68fdbd2acb9..34e95f05936e 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -12,7 +12,6 @@
#include <drm/drm_file.h>
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "intel_backlight.h"
#include "intel_backlight_regs.h"
#include "intel_connector.h"
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index a1fa3571eca0..b6fe87c29aa7 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -41,6 +41,7 @@
#include "intel_display_utils.h"
#include "intel_gmbus.h"
#include "intel_rom.h"
+#include "intel_vdsc.h"
#define _INTEL_BIOS_PRIVATE
#include "intel_vbt_defs.h"
@@ -1545,6 +1546,10 @@ parse_edp(struct intel_display *display,
if (display->vbt.version >= 251)
panel->vbt.edp.dsc_disable =
panel_bool(edp->edp_dsc_disable, panel_type);
+
+ if (display->vbt.version >= 261)
+ panel->vbt.edp.pipe_joiner_enable =
+ panel_bool(edp->pipe_joiner_enable, panel_type);
}
static void
@@ -3543,12 +3548,13 @@ bool intel_bios_is_dsi_present(struct intel_display *display,
return false;
}
-static void fill_dsc(struct intel_crtc_state *crtc_state,
+static bool fill_dsc(struct intel_crtc_state *crtc_state,
struct dsc_compression_parameters_entry *dsc,
int dsc_max_bpc)
{
struct intel_display *display = to_intel_display(crtc_state);
struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ int slices_per_line;
int bpc = 8;
vdsc_cfg->dsc_version_major = dsc->version_major;
@@ -3574,26 +3580,33 @@ static void fill_dsc(struct intel_crtc_state *crtc_state,
* throughput etc. into account.
*
* Also, per spec DSI supports 1, 2, 3 or 4 horizontal slices.
+ *
+ * FIXME: split only when necessary
*/
if (dsc->slices_per_line & BIT(2)) {
- crtc_state->dsc.slice_count = 4;
+ slices_per_line = 4;
} else if (dsc->slices_per_line & BIT(1)) {
- crtc_state->dsc.slice_count = 2;
+ slices_per_line = 2;
} else {
/* FIXME */
if (!(dsc->slices_per_line & BIT(0)))
drm_dbg_kms(display->drm,
"VBT: Unsupported DSC slice count for DSI\n");
- crtc_state->dsc.slice_count = 1;
+ slices_per_line = 1;
}
+ if (drm_WARN_ON(display->drm,
+ !intel_dsc_get_slice_config(display, 1, slices_per_line,
+ &crtc_state->dsc.slice_config)))
+ return false;
+
if (crtc_state->hw.adjusted_mode.crtc_hdisplay %
- crtc_state->dsc.slice_count != 0)
+ intel_dsc_line_slice_count(&crtc_state->dsc.slice_config) != 0)
drm_dbg_kms(display->drm,
"VBT: DSC hdisplay %d not divisible by slice count %d\n",
crtc_state->hw.adjusted_mode.crtc_hdisplay,
- crtc_state->dsc.slice_count);
+ intel_dsc_line_slice_count(&crtc_state->dsc.slice_config));
/*
* The VBT rc_buffer_block_size and rc_buffer_size definitions
@@ -3608,6 +3621,8 @@ static void fill_dsc(struct intel_crtc_state *crtc_state,
vdsc_cfg->block_pred_enable = dsc->block_prediction_enable;
vdsc_cfg->slice_height = dsc->slice_height;
+
+ return true;
}
/* FIXME: initially DSI specific */
@@ -3628,9 +3643,7 @@ bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
if (!devdata->dsc)
return false;
- fill_dsc(crtc_state, devdata->dsc, dsc_max_bpc);
-
- return true;
+ return fill_dsc(crtc_state, devdata->dsc, dsc_max_bpc);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_bo.c b/drivers/gpu/drm/i915/display/intel_bo.c
index 8f372b33d48b..3b82d38a0504 100644
--- a/drivers/gpu/drm/i915/display/intel_bo.c
+++ b/drivers/gpu/drm/i915/display/intel_bo.c
@@ -1,87 +1,87 @@
// SPDX-License-Identifier: MIT
-/* Copyright © 2024 Intel Corporation */
+/* Copyright © 2026 Intel Corporation */
-#include <drm/drm_panic.h>
+#include <drm/drm_gem.h>
+#include <drm/intel/display_parent_interface.h>
-#include "gem/i915_gem_mman.h"
-#include "gem/i915_gem_object.h"
-#include "gem/i915_gem_object_frontbuffer.h"
-#include "pxp/intel_pxp.h"
-#include "i915_debugfs.h"
#include "intel_bo.h"
+#include "intel_display_core.h"
+#include "intel_display_types.h"
bool intel_bo_is_tiled(struct drm_gem_object *obj)
{
- return i915_gem_object_is_tiled(to_intel_bo(obj));
+ struct intel_display *display = to_intel_display(obj->dev);
+
+ return display->parent->bo->is_tiled && display->parent->bo->is_tiled(obj);
}
bool intel_bo_is_userptr(struct drm_gem_object *obj)
{
- return i915_gem_object_is_userptr(to_intel_bo(obj));
+ struct intel_display *display = to_intel_display(obj->dev);
+
+ return display->parent->bo->is_userptr && display->parent->bo->is_userptr(obj);
}
bool intel_bo_is_shmem(struct drm_gem_object *obj)
{
- return i915_gem_object_is_shmem(to_intel_bo(obj));
+ struct intel_display *display = to_intel_display(obj->dev);
+
+ return display->parent->bo->is_shmem && display->parent->bo->is_shmem(obj);
}
bool intel_bo_is_protected(struct drm_gem_object *obj)
{
- return i915_gem_object_is_protected(to_intel_bo(obj));
+ struct intel_display *display = to_intel_display(obj->dev);
+
+ return display->parent->bo->is_protected(obj);
}
int intel_bo_key_check(struct drm_gem_object *obj)
{
- return intel_pxp_key_check(obj, false);
+ struct intel_display *display = to_intel_display(obj->dev);
+
+ return display->parent->bo->key_check(obj);
}
int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
- return i915_gem_fb_mmap(to_intel_bo(obj), vma);
-}
+ struct intel_display *display = to_intel_display(obj->dev);
-int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size)
-{
- return i915_gem_object_read_from_page(to_intel_bo(obj), offset, dst, size);
+ return display->parent->bo->fb_mmap(obj, vma);
}
-struct intel_frontbuffer *intel_bo_frontbuffer_get(struct drm_gem_object *_obj)
+int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size)
{
- struct drm_i915_gem_object *obj = to_intel_bo(_obj);
- struct i915_frontbuffer *front;
+ struct intel_display *display = to_intel_display(obj->dev);
- front = i915_gem_object_frontbuffer_get(obj);
- if (!front)
- return NULL;
-
- return &front->base;
+ return display->parent->bo->read_from_page(obj, offset, dst, size);
}
-void intel_bo_frontbuffer_ref(struct intel_frontbuffer *_front)
+void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
{
- struct i915_frontbuffer *front =
- container_of(_front, typeof(*front), base);
+ struct intel_display *display = to_intel_display(obj->dev);
- i915_gem_object_frontbuffer_ref(front);
+ if (display->parent->bo->describe)
+ display->parent->bo->describe(m, obj);
}
-void intel_bo_frontbuffer_put(struct intel_frontbuffer *_front)
+int intel_bo_framebuffer_init(struct drm_gem_object *obj, struct drm_mode_fb_cmd2 *mode_cmd)
{
- struct i915_frontbuffer *front =
- container_of(_front, typeof(*front), base);
+ struct intel_display *display = to_intel_display(obj->dev);
- return i915_gem_object_frontbuffer_put(front);
+ return display->parent->bo->framebuffer_init(obj, mode_cmd);
}
-void intel_bo_frontbuffer_flush_for_display(struct intel_frontbuffer *_front)
+void intel_bo_framebuffer_fini(struct drm_gem_object *obj)
{
- struct i915_frontbuffer *front =
- container_of(_front, typeof(*front), base);
+ struct intel_display *display = to_intel_display(obj->dev);
- i915_gem_object_flush_if_display(front->obj);
+ display->parent->bo->framebuffer_fini(obj);
}
-void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
+struct drm_gem_object *intel_bo_framebuffer_lookup(struct intel_display *display,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *user_mode_cmd)
{
- i915_debugfs_describe_obj(m, to_intel_bo(obj));
+ return display->parent->bo->framebuffer_lookup(display->drm, filp, user_mode_cmd);
}
diff --git a/drivers/gpu/drm/i915/display/intel_bo.h b/drivers/gpu/drm/i915/display/intel_bo.h
index 516a3836a6bc..aec188c706c2 100644
--- a/drivers/gpu/drm/i915/display/intel_bo.h
+++ b/drivers/gpu/drm/i915/display/intel_bo.h
@@ -6,8 +6,11 @@
#include <linux/types.h>
+struct drm_file;
struct drm_gem_object;
+struct drm_mode_fb_cmd2;
struct drm_scanout_buffer;
+struct intel_display;
struct intel_framebuffer;
struct seq_file;
struct vm_area_struct;
@@ -20,11 +23,12 @@ int intel_bo_key_check(struct drm_gem_object *obj);
int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size);
-struct intel_frontbuffer *intel_bo_frontbuffer_get(struct drm_gem_object *obj);
-void intel_bo_frontbuffer_ref(struct intel_frontbuffer *front);
-void intel_bo_frontbuffer_put(struct intel_frontbuffer *front);
-void intel_bo_frontbuffer_flush_for_display(struct intel_frontbuffer *front);
-
void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj);
+void intel_bo_framebuffer_fini(struct drm_gem_object *obj);
+int intel_bo_framebuffer_init(struct drm_gem_object *obj, struct drm_mode_fb_cmd2 *mode_cmd);
+struct drm_gem_object *intel_bo_framebuffer_lookup(struct intel_display *display,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *user_mode_cmd);
+
#endif /* __INTEL_BO__ */
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index fe48949b5880..07b4531a4376 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -5,8 +5,8 @@
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_pcode_regs.h>
-#include "i915_reg.h"
#include "intel_bw.h"
#include "intel_crtc.h"
#include "intel_display_core.h"
@@ -15,7 +15,7 @@
#include "intel_display_utils.h"
#include "intel_dram.h"
#include "intel_mchbar_regs.h"
-#include "intel_pcode.h"
+#include "intel_parent.h"
#include "intel_uncore.h"
#include "skl_watermark.h"
@@ -114,9 +114,9 @@ static int icl_pcode_read_qgv_point_info(struct intel_display *display,
u16 dclk;
int ret;
- ret = intel_pcode_read(display->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
- ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
- &val, &val2);
+ ret = intel_parent_pcode_read(display, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point),
+ &val, &val2);
if (ret)
return ret;
@@ -141,8 +141,8 @@ static int adls_pcode_read_psf_gv_point_info(struct intel_display *display,
int ret;
int i;
- ret = intel_pcode_read(display->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
- ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL);
+ ret = intel_parent_pcode_read(display, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL);
if (ret)
return ret;
@@ -189,11 +189,11 @@ static int icl_pcode_restrict_qgv_points(struct intel_display *display,
return 0;
/* bspec says to keep retrying for at least 1 ms */
- ret = intel_pcode_request(display->drm, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
- points_mask,
- ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK,
- ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE,
- 1);
+ ret = intel_parent_pcode_request(display, ICL_PCODE_SAGV_DE_MEM_SS_CONFIG,
+ points_mask,
+ ICL_PCODE_REP_QGV_MASK | ADLS_PCODE_REP_PSF_MASK,
+ ICL_PCODE_REP_QGV_SAFE | ADLS_PCODE_REP_PSF_SAFE,
+ 1);
if (ret < 0) {
drm_err(display->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_casf.c b/drivers/gpu/drm/i915/display/intel_casf.c
index 95339b496f24..b167af31de5b 100644
--- a/drivers/gpu/drm/i915/display/intel_casf.c
+++ b/drivers/gpu/drm/i915/display/intel_casf.c
@@ -3,7 +3,6 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "intel_casf.h"
#include "intel_casf_regs.h"
#include "intel_de.h"
@@ -116,6 +115,12 @@ int intel_casf_compute_config(struct intel_crtc_state *crtc_state)
return 0;
}
+ /* CASF with joiner not supported in hardware */
+ if (crtc_state->joiner_pipes) {
+ drm_dbg_kms(display->drm, "CASF not supported with joiner\n");
+ return -EINVAL;
+ }
+
crtc_state->hw.casf_params.casf_enable = true;
/*
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index f5946e677c93..121a12c5b8ac 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -27,9 +27,9 @@
#include <drm/drm_fixed.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_pcode_regs.h>
#include "hsw_ips.h"
-#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_audio.h"
#include "intel_cdclk.h"
@@ -42,8 +42,8 @@
#include "intel_display_wa.h"
#include "intel_dram.h"
#include "intel_mchbar_regs.h"
+#include "intel_parent.h"
#include "intel_pci_config.h"
-#include "intel_pcode.h"
#include "intel_plane.h"
#include "intel_psr.h"
#include "intel_step.h"
@@ -888,7 +888,7 @@ static void bdw_set_cdclk(struct intel_display *display,
"trying to change cdclk frequency with cdclk not enabled\n"))
return;
- ret = intel_pcode_write(display->drm, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
+ ret = intel_parent_pcode_write(display, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
if (ret) {
drm_err(display->drm,
"failed to inform pcode about cdclk change\n");
@@ -918,8 +918,8 @@ static void bdw_set_cdclk(struct intel_display *display,
if (ret)
drm_err(display->drm, "Switching back to LCPLL failed\n");
- intel_pcode_write(display->drm, HSW_PCODE_DE_WRITE_FREQ_REQ,
- cdclk_config->voltage_level);
+ intel_parent_pcode_write(display, HSW_PCODE_DE_WRITE_FREQ_REQ,
+ cdclk_config->voltage_level);
intel_de_write(display, CDCLK_FREQ,
DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
@@ -1175,10 +1175,10 @@ static void skl_set_cdclk(struct intel_display *display,
drm_WARN_ON_ONCE(display->drm,
display->platform.skylake && vco == 8640000);
- ret = intel_pcode_request(display->drm, SKL_PCODE_CDCLK_CONTROL,
- SKL_CDCLK_PREPARE_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE, 3);
+ ret = intel_parent_pcode_request(display, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
if (ret) {
drm_err(display->drm,
"Failed to inform PCU about cdclk change (%d)\n", ret);
@@ -1221,8 +1221,8 @@ static void skl_set_cdclk(struct intel_display *display,
intel_de_posting_read(display, CDCLK_CTL);
/* inform PCU of the change */
- intel_pcode_write(display->drm, SKL_PCODE_CDCLK_CONTROL,
- cdclk_config->voltage_level);
+ intel_parent_pcode_write(display, SKL_PCODE_CDCLK_CONTROL,
+ cdclk_config->voltage_level);
intel_update_cdclk(display);
}
@@ -1870,7 +1870,7 @@ static void icl_cdclk_pll_disable(struct intel_display *display)
* after the PLL is enabled (which is already done as part of the
* normal flow of _bxt_set_cdclk()).
*/
- if (intel_display_wa(display, 13012396614))
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_13012396614))
intel_de_rmw(display, CDCLK_CTL, MDCLK_SOURCE_SEL_MASK, MDCLK_SOURCE_SEL_CD2XCLK);
intel_de_rmw(display, BXT_DE_PLL_ENABLE,
@@ -2186,7 +2186,8 @@ static u32 bxt_cdclk_ctl(struct intel_display *display,
* icl_cdclk_pll_disable(). Here we are just making sure
* we keep the expected value.
*/
- if (intel_display_wa(display, 13012396614) && vco == 0)
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_13012396614) &&
+ vco == 0)
val |= MDCLK_SOURCE_SEL_CD2XCLK;
else
val |= xe2lpd_mdclk_source_sel(display);
@@ -2247,18 +2248,18 @@ static void bxt_set_cdclk(struct intel_display *display,
if (DISPLAY_VER(display) >= 14 || display->platform.dg2)
; /* NOOP */
else if (DISPLAY_VER(display) >= 11)
- ret = intel_pcode_request(display->drm, SKL_PCODE_CDCLK_CONTROL,
- SKL_CDCLK_PREPARE_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE, 3);
+ ret = intel_parent_pcode_request(display, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
else
/*
* BSpec requires us to wait up to 150usec, but that leads to
* timeouts; the 2ms used here is based on experiment.
*/
- ret = intel_pcode_write_timeout(display->drm,
- HSW_PCODE_DE_WRITE_FREQ_REQ,
- 0x80000000, 2);
+ ret = intel_parent_pcode_write_timeout(display,
+ HSW_PCODE_DE_WRITE_FREQ_REQ,
+ 0x80000000, 2);
if (ret) {
drm_err(display->drm,
@@ -2287,8 +2288,8 @@ static void bxt_set_cdclk(struct intel_display *display,
* Display versions 14 and beyond
*/;
else if (DISPLAY_VER(display) >= 11 && !display->platform.dg2)
- ret = intel_pcode_write(display->drm, SKL_PCODE_CDCLK_CONTROL,
- cdclk_config->voltage_level);
+ ret = intel_parent_pcode_write(display, SKL_PCODE_CDCLK_CONTROL,
+ cdclk_config->voltage_level);
if (DISPLAY_VER(display) < 11) {
/*
* The timeout isn't specified, the 2ms used here is based on
@@ -2296,9 +2297,9 @@ static void bxt_set_cdclk(struct intel_display *display,
* FIXME: Waiting for the request completion could be delayed
* until the next PCODE request based on BSpec.
*/
- ret = intel_pcode_write_timeout(display->drm,
- HSW_PCODE_DE_WRITE_FREQ_REQ,
- cdclk_config->voltage_level, 2);
+ ret = intel_parent_pcode_write_timeout(display,
+ HSW_PCODE_DE_WRITE_FREQ_REQ,
+ cdclk_config->voltage_level, 2);
}
if (ret) {
drm_err(display->drm,
@@ -2598,11 +2599,11 @@ static void intel_pcode_notify(struct intel_display *display,
if (pipe_count_update_valid)
update_mask |= DISPLAY_TO_PCODE_PIPE_COUNT_VALID;
- ret = intel_pcode_request(display->drm, SKL_PCODE_CDCLK_CONTROL,
- SKL_CDCLK_PREPARE_FOR_CHANGE |
- update_mask,
- SKL_CDCLK_READY_FOR_CHANGE,
- SKL_CDCLK_READY_FOR_CHANGE, 3);
+ ret = intel_parent_pcode_request(display, SKL_PCODE_CDCLK_CONTROL,
+ SKL_CDCLK_PREPARE_FOR_CHANGE |
+ update_mask,
+ SKL_CDCLK_READY_FOR_CHANGE,
+ SKL_CDCLK_READY_FOR_CHANGE, 3);
if (ret)
drm_err(display->drm,
"Failed to inform PCU about display config (err %d)\n",
@@ -4006,7 +4007,7 @@ void intel_init_cdclk_hooks(struct intel_display *display)
display->cdclk.table = dg2_cdclk_table;
} else if (display->platform.alderlake_p) {
/* Wa_22011320316:adl-p[a0] */
- if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_22011320316)) {
display->cdclk.table = adlp_a_step_cdclk_table;
display->funcs.cdclk = &tgl_cdclk_funcs;
} else if (display->platform.alderlake_p_raptorlake_u) {
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index 59d637c6a187..6aa6a1dd6e1b 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -33,6 +33,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
+#include <video/vga.h>
#include "intel_connector.h"
#include "intel_crt.h"
@@ -55,6 +56,7 @@
#include "intel_pch_display.h"
#include "intel_pch_refclk.h"
#include "intel_pfit.h"
+#include "intel_vga.h"
/* Here's the desired hotplug mode */
#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_ENABLE | \
@@ -691,6 +693,11 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
return ret;
}
+static bool intel_crt_sense_above_threshold(struct intel_display *display)
+{
+ return intel_vga_read(display, VGA_IS0_R, true) & (1 << 4);
+}
+
static enum drm_connector_status
intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe)
{
@@ -702,7 +709,6 @@ intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe)
u32 vsample;
u32 vblank, vblank_start, vblank_end;
u32 dsl;
- u8 st00;
enum drm_connector_status status;
drm_dbg_kms(display->drm, "starting load-detect on CRT\n");
@@ -736,8 +742,8 @@ intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe)
* border color for Color info.
*/
intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(display, pipe));
- st00 = intel_de_read8(display, _VGA_MSR_WRITE);
- status = ((st00 & (1 << 4)) != 0) ?
+
+ status = intel_crt_sense_above_threshold(display) ?
connector_status_connected :
connector_status_disconnected;
@@ -777,15 +783,13 @@ intel_crt_load_detect(struct intel_crt *crt, enum pipe pipe)
while ((dsl = intel_de_read(display, PIPEDSL(display, pipe))) <= vsample)
;
/*
- * Watch ST00 for an entire scanline
+ * Watch sense for an entire scanline
*/
detect = 0;
count = 0;
do {
count++;
- /* Read the ST00 VGA status register */
- st00 = intel_de_read8(display, _VGA_MSR_WRITE);
- if (st00 & (1 << 4))
+ if (intel_crt_sense_above_threshold(display))
detect++;
} while ((intel_de_read(display, PIPEDSL(display, pipe)) == dsl));
diff --git a/drivers/gpu/drm/i915/display/intel_crt_regs.h b/drivers/gpu/drm/i915/display/intel_crt_regs.h
index 571a67ae9afa..9a93020b9a7e 100644
--- a/drivers/gpu/drm/i915/display/intel_crt_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_crt_regs.h
@@ -45,6 +45,4 @@
#define ADPA_VSYNC_ACTIVE_HIGH REG_BIT(4)
#define ADPA_HSYNC_ACTIVE_HIGH REG_BIT(3)
-#define _VGA_MSR_WRITE _MMIO(0x3c2)
-
#endif /* __INTEL_CRT_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c
index 53378d2dcbec..b8189cd5d864 100644
--- a/drivers/gpu/drm/i915/display/intel_crtc.c
+++ b/drivers/gpu/drm/i915/display/intel_crtc.c
@@ -747,7 +747,9 @@ void intel_pipe_update_end(struct intel_atomic_state *state,
* which would cause the next frame to terminate already at vmin
* vblank start instead of vmax vblank start.
*/
- if (!state->base.legacy_cursor_update)
+ if (!state->base.legacy_cursor_update ||
+ (intel_psr_use_trans_push(new_crtc_state) &&
+ !new_crtc_state->vrr.enable))
intel_vrr_send_push(NULL, new_crtc_state);
local_irq_enable();
diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c
index 2c5d917fbd7e..18d1014de361 100644
--- a/drivers/gpu/drm/i915/display/intel_cursor.c
+++ b/drivers/gpu/drm/i915/display/intel_cursor.c
@@ -19,6 +19,7 @@
#include "intel_display.h"
#include "intel_display_types.h"
#include "intel_display_utils.h"
+#include "intel_display_wa.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_frontbuffer.h"
@@ -424,7 +425,7 @@ static u32 i9xx_cursor_ctl(const struct intel_plane_state *plane_state)
cntl |= MCURSOR_ROTATE_180;
/* Wa_22012358565:adl-p */
- if (DISPLAY_VER(display) == 13)
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_22012358565))
cntl |= MCURSOR_ARB_SLOTS(1);
return cntl;
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.c b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
index 7288065d2461..6a471c021c0e 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.c
@@ -18,6 +18,7 @@
#include "intel_display_types.h"
#include "intel_display_utils.h"
#include "intel_dp.h"
+#include "intel_dpll.h"
#include "intel_hdmi.h"
#include "intel_lt_phy.h"
#include "intel_panel.h"
@@ -127,8 +128,8 @@ static void intel_cx0_phy_transaction_end(struct intel_encoder *encoder, struct
intel_display_power_put(display, POWER_DOMAIN_DC_OFF, wakeref);
}
-void intel_clear_response_ready_flag(struct intel_encoder *encoder,
- int lane)
+void intel_cx0_clear_response_ready_flag(struct intel_encoder *encoder,
+ int lane)
{
struct intel_display *display = to_intel_display(encoder);
@@ -155,7 +156,7 @@ void intel_cx0_bus_reset(struct intel_encoder *encoder, int lane)
return;
}
- intel_clear_response_ready_flag(encoder, lane);
+ intel_cx0_clear_response_ready_flag(encoder, lane);
}
int intel_cx0_wait_for_ack(struct intel_encoder *encoder,
@@ -222,6 +223,8 @@ static int __intel_cx0_read_once(struct intel_encoder *encoder,
return -ETIMEDOUT;
}
+ intel_cx0_clear_response_ready_flag(encoder, lane);
+
intel_de_write(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
XELPDP_PORT_M2P_TRANSACTION_PENDING |
XELPDP_PORT_M2P_COMMAND_READ |
@@ -231,7 +234,7 @@ static int __intel_cx0_read_once(struct intel_encoder *encoder,
if (ack < 0)
return ack;
- intel_clear_response_ready_flag(encoder, lane);
+ intel_cx0_clear_response_ready_flag(encoder, lane);
/*
* FIXME: Workaround to let HW to settle
@@ -293,6 +296,8 @@ static int __intel_cx0_write_once(struct intel_encoder *encoder,
return -ETIMEDOUT;
}
+ intel_cx0_clear_response_ready_flag(encoder, lane);
+
intel_de_write(display, XELPDP_PORT_M2P_MSGBUS_CTL(display, port, lane),
XELPDP_PORT_M2P_TRANSACTION_PENDING |
(committed ? XELPDP_PORT_M2P_COMMAND_WRITE_COMMITTED :
@@ -321,7 +326,7 @@ static int __intel_cx0_write_once(struct intel_encoder *encoder,
return -EINVAL;
}
- intel_clear_response_ready_flag(encoder, lane);
+ intel_cx0_clear_response_ready_flag(encoder, lane);
/*
* FIXME: Workaround to let HW to settle
@@ -547,7 +552,6 @@ void intel_cx0_phy_set_signal_levels(struct intel_encoder *encoder,
*/
static const struct intel_c10pll_state mtl_c10_dp_rbr = {
- .clock = 162000,
.tx = 0x10,
.cmn = 0x21,
.pll[0] = 0xB4,
@@ -573,7 +577,6 @@ static const struct intel_c10pll_state mtl_c10_dp_rbr = {
};
static const struct intel_c10pll_state mtl_c10_edp_r216 = {
- .clock = 216000,
.tx = 0x10,
.cmn = 0x21,
.pll[0] = 0x4,
@@ -599,7 +602,6 @@ static const struct intel_c10pll_state mtl_c10_edp_r216 = {
};
static const struct intel_c10pll_state mtl_c10_edp_r243 = {
- .clock = 243000,
.tx = 0x10,
.cmn = 0x21,
.pll[0] = 0x34,
@@ -625,7 +627,6 @@ static const struct intel_c10pll_state mtl_c10_edp_r243 = {
};
static const struct intel_c10pll_state mtl_c10_dp_hbr1 = {
- .clock = 270000,
.tx = 0x10,
.cmn = 0x21,
.pll[0] = 0xF4,
@@ -651,7 +652,6 @@ static const struct intel_c10pll_state mtl_c10_dp_hbr1 = {
};
static const struct intel_c10pll_state mtl_c10_edp_r324 = {
- .clock = 324000,
.tx = 0x10,
.cmn = 0x21,
.pll[0] = 0xB4,
@@ -677,7 +677,6 @@ static const struct intel_c10pll_state mtl_c10_edp_r324 = {
};
static const struct intel_c10pll_state mtl_c10_edp_r432 = {
- .clock = 432000,
.tx = 0x10,
.cmn = 0x21,
.pll[0] = 0x4,
@@ -703,7 +702,6 @@ static const struct intel_c10pll_state mtl_c10_edp_r432 = {
};
static const struct intel_c10pll_state mtl_c10_dp_hbr2 = {
- .clock = 540000,
.tx = 0x10,
.cmn = 0x21,
.pll[0] = 0xF4,
@@ -729,7 +727,6 @@ static const struct intel_c10pll_state mtl_c10_dp_hbr2 = {
};
static const struct intel_c10pll_state mtl_c10_edp_r675 = {
- .clock = 675000,
.tx = 0x10,
.cmn = 0x21,
.pll[0] = 0xB4,
@@ -755,7 +752,6 @@ static const struct intel_c10pll_state mtl_c10_edp_r675 = {
};
static const struct intel_c10pll_state mtl_c10_dp_hbr3 = {
- .clock = 810000,
.tx = 0x10,
.cmn = 0x21,
.pll[0] = 0x34,
@@ -780,30 +776,62 @@ static const struct intel_c10pll_state mtl_c10_dp_hbr3 = {
.pll[19] = 0x23,
};
-static const struct intel_c10pll_state * const mtl_c10_dp_tables[] = {
- &mtl_c10_dp_rbr,
- &mtl_c10_dp_hbr1,
- &mtl_c10_dp_hbr2,
- &mtl_c10_dp_hbr3,
- NULL,
+struct intel_cx0pll_params {
+ const char *name;
+ bool is_c10;
+ bool is_hdmi;
+ int clock_rate;
+ union {
+ const struct intel_c10pll_state *c10;
+ const struct intel_c20pll_state *c20;
+ };
};
-static const struct intel_c10pll_state * const mtl_c10_edp_tables[] = {
- &mtl_c10_dp_rbr,
- &mtl_c10_edp_r216,
- &mtl_c10_edp_r243,
- &mtl_c10_dp_hbr1,
- &mtl_c10_edp_r324,
- &mtl_c10_edp_r432,
- &mtl_c10_dp_hbr2,
- &mtl_c10_edp_r675,
- &mtl_c10_dp_hbr3,
- NULL,
+#define __C10PLL_PARAMS(__is_hdmi, __clock_rate, __state) { \
+ .name = __stringify(__state), \
+ .is_c10 = true, \
+ .is_hdmi = __is_hdmi, \
+ .clock_rate = __clock_rate, \
+ .c10 = &__state, \
+}
+
+#define __C20PLL_PARAMS(__is_hdmi, __clock_rate, __state) { \
+ .name = __stringify(__state), \
+ .is_c10 = false, \
+ .is_hdmi = __is_hdmi, \
+ .clock_rate = __clock_rate, \
+ .c20 = &__state, \
+}
+
+#define C10PLL_HDMI_PARAMS(__clock_rate, __state) __C10PLL_PARAMS(true, __clock_rate, __state)
+#define C10PLL_DP_PARAMS(__clock_rate, __state) __C10PLL_PARAMS(false, __clock_rate, __state)
+
+#define C20PLL_HDMI_PARAMS(__clock_rate, __state) __C20PLL_PARAMS(true, __clock_rate, __state)
+#define C20PLL_DP_PARAMS(__clock_rate, __state) __C20PLL_PARAMS(false, __clock_rate, __state)
+
+static const struct intel_cx0pll_params mtl_c10_dp_tables[] = {
+ C10PLL_DP_PARAMS(162000, mtl_c10_dp_rbr),
+ C10PLL_DP_PARAMS(270000, mtl_c10_dp_hbr1),
+ C10PLL_DP_PARAMS(540000, mtl_c10_dp_hbr2),
+ C10PLL_DP_PARAMS(810000, mtl_c10_dp_hbr3),
+ {}
+};
+
+static const struct intel_cx0pll_params mtl_c10_edp_tables[] = {
+ C10PLL_DP_PARAMS(162000, mtl_c10_dp_rbr),
+ C10PLL_DP_PARAMS(216000, mtl_c10_edp_r216),
+ C10PLL_DP_PARAMS(243000, mtl_c10_edp_r243),
+ C10PLL_DP_PARAMS(270000, mtl_c10_dp_hbr1),
+ C10PLL_DP_PARAMS(324000, mtl_c10_edp_r324),
+ C10PLL_DP_PARAMS(432000, mtl_c10_edp_r432),
+ C10PLL_DP_PARAMS(540000, mtl_c10_dp_hbr2),
+ C10PLL_DP_PARAMS(675000, mtl_c10_edp_r675),
+ C10PLL_DP_PARAMS(810000, mtl_c10_dp_hbr3),
+ {}
};
/* C20 basic DP 1.4 tables */
static const struct intel_c20pll_state mtl_c20_dp_rbr = {
- .clock = 162000,
.tx = { 0xbe88, /* tx cfg0 */
0x5800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -828,7 +856,6 @@ static const struct intel_c20pll_state mtl_c20_dp_rbr = {
};
static const struct intel_c20pll_state mtl_c20_dp_hbr1 = {
- .clock = 270000,
.tx = { 0xbe88, /* tx cfg0 */
0x4800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -853,7 +880,6 @@ static const struct intel_c20pll_state mtl_c20_dp_hbr1 = {
};
static const struct intel_c20pll_state mtl_c20_dp_hbr2 = {
- .clock = 540000,
.tx = { 0xbe88, /* tx cfg0 */
0x4800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -878,7 +904,6 @@ static const struct intel_c20pll_state mtl_c20_dp_hbr2 = {
};
static const struct intel_c20pll_state mtl_c20_dp_hbr3 = {
- .clock = 810000,
.tx = { 0xbe88, /* tx cfg0 */
0x4800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -904,7 +929,6 @@ static const struct intel_c20pll_state mtl_c20_dp_hbr3 = {
/* C20 basic DP 2.0 tables */
static const struct intel_c20pll_state mtl_c20_dp_uhbr10 = {
- .clock = 1000000, /* 10 Gbps */
.tx = { 0xbe21, /* tx cfg0 */
0xe800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -928,7 +952,6 @@ static const struct intel_c20pll_state mtl_c20_dp_uhbr10 = {
};
static const struct intel_c20pll_state mtl_c20_dp_uhbr13_5 = {
- .clock = 1350000, /* 13.5 Gbps */
.tx = { 0xbea0, /* tx cfg0 */
0x4800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -953,7 +976,6 @@ static const struct intel_c20pll_state mtl_c20_dp_uhbr13_5 = {
};
static const struct intel_c20pll_state mtl_c20_dp_uhbr20 = {
- .clock = 2000000, /* 20 Gbps */
.tx = { 0xbe20, /* tx cfg0 */
0x4800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -976,15 +998,15 @@ static const struct intel_c20pll_state mtl_c20_dp_uhbr20 = {
},
};
-static const struct intel_c20pll_state * const mtl_c20_dp_tables[] = {
- &mtl_c20_dp_rbr,
- &mtl_c20_dp_hbr1,
- &mtl_c20_dp_hbr2,
- &mtl_c20_dp_hbr3,
- &mtl_c20_dp_uhbr10,
- &mtl_c20_dp_uhbr13_5,
- &mtl_c20_dp_uhbr20,
- NULL,
+static const struct intel_cx0pll_params mtl_c20_dp_tables[] = {
+ C20PLL_DP_PARAMS(162000, mtl_c20_dp_rbr),
+ C20PLL_DP_PARAMS(270000, mtl_c20_dp_hbr1),
+ C20PLL_DP_PARAMS(540000, mtl_c20_dp_hbr2),
+ C20PLL_DP_PARAMS(810000, mtl_c20_dp_hbr3),
+ C20PLL_DP_PARAMS(1000000, mtl_c20_dp_uhbr10),
+ C20PLL_DP_PARAMS(1350000, mtl_c20_dp_uhbr13_5),
+ C20PLL_DP_PARAMS(2000000, mtl_c20_dp_uhbr20),
+ {}
};
/*
@@ -992,7 +1014,6 @@ static const struct intel_c20pll_state * const mtl_c20_dp_tables[] = {
*/
static const struct intel_c20pll_state xe2hpd_c20_edp_r216 = {
- .clock = 216000,
.tx = { 0xbe88,
0x4800,
0x0000,
@@ -1017,7 +1038,6 @@ static const struct intel_c20pll_state xe2hpd_c20_edp_r216 = {
};
static const struct intel_c20pll_state xe2hpd_c20_edp_r243 = {
- .clock = 243000,
.tx = { 0xbe88,
0x4800,
0x0000,
@@ -1042,7 +1062,6 @@ static const struct intel_c20pll_state xe2hpd_c20_edp_r243 = {
};
static const struct intel_c20pll_state xe2hpd_c20_edp_r324 = {
- .clock = 324000,
.tx = { 0xbe88,
0x4800,
0x0000,
@@ -1067,7 +1086,6 @@ static const struct intel_c20pll_state xe2hpd_c20_edp_r324 = {
};
static const struct intel_c20pll_state xe2hpd_c20_edp_r432 = {
- .clock = 432000,
.tx = { 0xbe88,
0x4800,
0x0000,
@@ -1092,7 +1110,6 @@ static const struct intel_c20pll_state xe2hpd_c20_edp_r432 = {
};
static const struct intel_c20pll_state xe2hpd_c20_edp_r675 = {
- .clock = 675000,
.tx = { 0xbe88,
0x4800,
0x0000,
@@ -1116,21 +1133,20 @@ static const struct intel_c20pll_state xe2hpd_c20_edp_r675 = {
},
};
-static const struct intel_c20pll_state * const xe2hpd_c20_edp_tables[] = {
- &mtl_c20_dp_rbr,
- &xe2hpd_c20_edp_r216,
- &xe2hpd_c20_edp_r243,
- &mtl_c20_dp_hbr1,
- &xe2hpd_c20_edp_r324,
- &xe2hpd_c20_edp_r432,
- &mtl_c20_dp_hbr2,
- &xe2hpd_c20_edp_r675,
- &mtl_c20_dp_hbr3,
- NULL,
+static const struct intel_cx0pll_params xe2hpd_c20_edp_tables[] = {
+ C20PLL_DP_PARAMS(162000, mtl_c20_dp_rbr),
+ C20PLL_DP_PARAMS(216000, xe2hpd_c20_edp_r216),
+ C20PLL_DP_PARAMS(243000, xe2hpd_c20_edp_r243),
+ C20PLL_DP_PARAMS(270000, mtl_c20_dp_hbr1),
+ C20PLL_DP_PARAMS(324000, xe2hpd_c20_edp_r324),
+ C20PLL_DP_PARAMS(432000, xe2hpd_c20_edp_r432),
+ C20PLL_DP_PARAMS(540000, mtl_c20_dp_hbr2),
+ C20PLL_DP_PARAMS(675000, xe2hpd_c20_edp_r675),
+ C20PLL_DP_PARAMS(810000, mtl_c20_dp_hbr3),
+ {}
};
static const struct intel_c20pll_state xe2hpd_c20_dp_uhbr13_5 = {
- .clock = 1350000, /* 13.5 Gbps */
.tx = { 0xbea0, /* tx cfg0 */
0x4800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1154,30 +1170,30 @@ static const struct intel_c20pll_state xe2hpd_c20_dp_uhbr13_5 = {
},
};
-static const struct intel_c20pll_state * const xe2hpd_c20_dp_tables[] = {
- &mtl_c20_dp_rbr,
- &mtl_c20_dp_hbr1,
- &mtl_c20_dp_hbr2,
- &mtl_c20_dp_hbr3,
- &mtl_c20_dp_uhbr10,
- &xe2hpd_c20_dp_uhbr13_5,
- NULL,
-};
-
-static const struct intel_c20pll_state * const xe3lpd_c20_dp_edp_tables[] = {
- &mtl_c20_dp_rbr,
- &xe2hpd_c20_edp_r216,
- &xe2hpd_c20_edp_r243,
- &mtl_c20_dp_hbr1,
- &xe2hpd_c20_edp_r324,
- &xe2hpd_c20_edp_r432,
- &mtl_c20_dp_hbr2,
- &xe2hpd_c20_edp_r675,
- &mtl_c20_dp_hbr3,
- &mtl_c20_dp_uhbr10,
- &xe2hpd_c20_dp_uhbr13_5,
- &mtl_c20_dp_uhbr20,
- NULL,
+static const struct intel_cx0pll_params xe2hpd_c20_dp_tables[] = {
+ C20PLL_DP_PARAMS(162000, mtl_c20_dp_rbr),
+ C20PLL_DP_PARAMS(270000, mtl_c20_dp_hbr1),
+ C20PLL_DP_PARAMS(540000, mtl_c20_dp_hbr2),
+ C20PLL_DP_PARAMS(810000, mtl_c20_dp_hbr3),
+ C20PLL_DP_PARAMS(1000000, mtl_c20_dp_uhbr10),
+ C20PLL_DP_PARAMS(1350000, xe2hpd_c20_dp_uhbr13_5),
+ {}
+};
+
+static const struct intel_cx0pll_params xe3lpd_c20_dp_edp_tables[] = {
+ C20PLL_DP_PARAMS(162000, mtl_c20_dp_rbr),
+ C20PLL_DP_PARAMS(216000, xe2hpd_c20_edp_r216),
+ C20PLL_DP_PARAMS(243000, xe2hpd_c20_edp_r243),
+ C20PLL_DP_PARAMS(270000, mtl_c20_dp_hbr1),
+ C20PLL_DP_PARAMS(324000, xe2hpd_c20_edp_r324),
+ C20PLL_DP_PARAMS(432000, xe2hpd_c20_edp_r432),
+ C20PLL_DP_PARAMS(540000, mtl_c20_dp_hbr2),
+ C20PLL_DP_PARAMS(675000, xe2hpd_c20_edp_r675),
+ C20PLL_DP_PARAMS(810000, mtl_c20_dp_hbr3),
+ C20PLL_DP_PARAMS(1000000, mtl_c20_dp_uhbr10),
+ C20PLL_DP_PARAMS(1350000, xe2hpd_c20_dp_uhbr13_5),
+ C20PLL_DP_PARAMS(2000000, mtl_c20_dp_uhbr20),
+ {}
};
/*
@@ -1185,7 +1201,6 @@ static const struct intel_c20pll_state * const xe3lpd_c20_dp_edp_tables[] = {
*/
static const struct intel_c10pll_state mtl_c10_hdmi_25_2 = {
- .clock = 25200,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x4,
@@ -1211,7 +1226,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_25_2 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_27_0 = {
- .clock = 27000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x34,
@@ -1237,7 +1251,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_27_0 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_74_25 = {
- .clock = 74250,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4,
@@ -1263,7 +1276,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_74_25 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_148_5 = {
- .clock = 148500,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4,
@@ -1289,7 +1301,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_148_5 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_594 = {
- .clock = 594000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4,
@@ -1316,7 +1327,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_594 = {
/* Precomputed C10 HDMI PLL tables */
static const struct intel_c10pll_state mtl_c10_hdmi_27027 = {
- .clock = 27027,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xC0, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1326,7 +1336,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_27027 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_28320 = {
- .clock = 28320,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x04, .pll[1] = 0x00, .pll[2] = 0xCC, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1336,7 +1345,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_28320 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_30240 = {
- .clock = 30240,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x04, .pll[1] = 0x00, .pll[2] = 0xDC, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1346,7 +1354,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_30240 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_31500 = {
- .clock = 31500,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x62, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1356,7 +1363,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_31500 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_36000 = {
- .clock = 36000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xC4, .pll[1] = 0x00, .pll[2] = 0x76, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1366,7 +1372,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_36000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_40000 = {
- .clock = 40000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x86, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1376,7 +1381,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_40000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_49500 = {
- .clock = 49500,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0xAE, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1386,7 +1390,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_49500 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_50000 = {
- .clock = 50000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0xB0, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1396,7 +1399,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_50000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_57284 = {
- .clock = 57284,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xCE, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1406,7 +1408,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_57284 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_58000 = {
- .clock = 58000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xD0, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1416,7 +1417,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_58000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_65000 = {
- .clock = 65000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x66, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1426,7 +1426,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_65000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_71000 = {
- .clock = 71000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x72, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1436,7 +1435,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_71000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_74176 = {
- .clock = 74176,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1446,7 +1444,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_74176 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_75000 = {
- .clock = 75000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7C, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1456,7 +1453,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_75000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_78750 = {
- .clock = 78750,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x84, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1466,7 +1462,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_78750 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_85500 = {
- .clock = 85500,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x92, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1476,7 +1471,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_85500 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_88750 = {
- .clock = 88750,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0x98, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1486,7 +1480,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_88750 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_106500 = {
- .clock = 106500,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xBC, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1496,7 +1489,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_106500 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_108000 = {
- .clock = 108000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xC0, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1506,7 +1498,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_108000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_115500 = {
- .clock = 115500,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xD0, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1516,7 +1507,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_115500 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_119000 = {
- .clock = 119000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xD6, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1526,7 +1516,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_119000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_135000 = {
- .clock = 135000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x6C, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1536,7 +1525,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_135000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_138500 = {
- .clock = 138500,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x70, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1546,7 +1534,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_138500 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_147160 = {
- .clock = 147160,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x78, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1556,7 +1543,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_147160 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_148352 = {
- .clock = 148352,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1566,7 +1552,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_148352 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_154000 = {
- .clock = 154000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x80, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1576,7 +1561,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_154000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_162000 = {
- .clock = 162000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x88, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1586,7 +1570,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_162000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_167000 = {
- .clock = 167000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x8C, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1596,7 +1579,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_167000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_197802 = {
- .clock = 197802,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0xAE, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1606,7 +1588,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_197802 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_198000 = {
- .clock = 198000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x74, .pll[1] = 0x00, .pll[2] = 0xAE, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1616,7 +1597,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_198000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_209800 = {
- .clock = 209800,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xBA, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1626,7 +1606,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_209800 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_241500 = {
- .clock = 241500,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xDA, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1636,7 +1615,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_241500 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_262750 = {
- .clock = 262750,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x68, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1646,7 +1624,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_262750 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_268500 = {
- .clock = 268500,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x6A, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1656,7 +1633,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_268500 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_296703 = {
- .clock = 296703,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1666,7 +1642,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_296703 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_297000 = {
- .clock = 297000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1676,7 +1651,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_297000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_319750 = {
- .clock = 319750,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xB4, .pll[1] = 0x00, .pll[2] = 0x86, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1686,7 +1660,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_319750 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_497750 = {
- .clock = 497750,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0x34, .pll[1] = 0x00, .pll[2] = 0xE2, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1696,7 +1669,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_497750 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_592000 = {
- .clock = 592000,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1706,7 +1678,6 @@ static const struct intel_c10pll_state mtl_c10_hdmi_592000 = {
};
static const struct intel_c10pll_state mtl_c10_hdmi_593407 = {
- .clock = 593407,
.tx = 0x10,
.cmn = 0x1,
.pll[0] = 0xF4, .pll[1] = 0x00, .pll[2] = 0x7A, .pll[3] = 0x00, .pll[4] = 0x00,
@@ -1715,82 +1686,56 @@ static const struct intel_c10pll_state mtl_c10_hdmi_593407 = {
.pll[15] = 0x08, .pll[16] = 0x08, .pll[17] = 0x8F, .pll[18] = 0x84, .pll[19] = 0x23,
};
-static const struct intel_c10pll_state * const mtl_c10_hdmi_tables[] = {
- &mtl_c10_hdmi_25_2, /* Consolidated Table */
- &mtl_c10_hdmi_27_0, /* Consolidated Table */
- &mtl_c10_hdmi_27027,
- &mtl_c10_hdmi_28320,
- &mtl_c10_hdmi_30240,
- &mtl_c10_hdmi_31500,
- &mtl_c10_hdmi_36000,
- &mtl_c10_hdmi_40000,
- &mtl_c10_hdmi_49500,
- &mtl_c10_hdmi_50000,
- &mtl_c10_hdmi_57284,
- &mtl_c10_hdmi_58000,
- &mtl_c10_hdmi_65000,
- &mtl_c10_hdmi_71000,
- &mtl_c10_hdmi_74176,
- &mtl_c10_hdmi_74_25, /* Consolidated Table */
- &mtl_c10_hdmi_75000,
- &mtl_c10_hdmi_78750,
- &mtl_c10_hdmi_85500,
- &mtl_c10_hdmi_88750,
- &mtl_c10_hdmi_106500,
- &mtl_c10_hdmi_108000,
- &mtl_c10_hdmi_115500,
- &mtl_c10_hdmi_119000,
- &mtl_c10_hdmi_135000,
- &mtl_c10_hdmi_138500,
- &mtl_c10_hdmi_147160,
- &mtl_c10_hdmi_148352,
- &mtl_c10_hdmi_148_5, /* Consolidated Table */
- &mtl_c10_hdmi_154000,
- &mtl_c10_hdmi_162000,
- &mtl_c10_hdmi_167000,
- &mtl_c10_hdmi_197802,
- &mtl_c10_hdmi_198000,
- &mtl_c10_hdmi_209800,
- &mtl_c10_hdmi_241500,
- &mtl_c10_hdmi_262750,
- &mtl_c10_hdmi_268500,
- &mtl_c10_hdmi_296703,
- &mtl_c10_hdmi_297000,
- &mtl_c10_hdmi_319750,
- &mtl_c10_hdmi_497750,
- &mtl_c10_hdmi_592000,
- &mtl_c10_hdmi_593407,
- &mtl_c10_hdmi_594, /* Consolidated Table */
- NULL,
-};
-
-static const struct intel_c20pll_state mtl_c20_hdmi_25_175 = {
- .clock = 25175,
- .tx = { 0xbe88, /* tx cfg0 */
- 0x9800, /* tx cfg1 */
- 0x0000, /* tx cfg2 */
- },
- .cmn = { 0x0500, /* cmn cfg0*/
- 0x0005, /* cmn cfg1 */
- 0x0000, /* cmn cfg2 */
- 0x0000, /* cmn cfg3 */
- },
- .mpllb = { 0xa0d2, /* mpllb cfg0 */
- 0x7d80, /* mpllb cfg1 */
- 0x0906, /* mpllb cfg2 */
- 0xbe40, /* mpllb cfg3 */
- 0x0000, /* mpllb cfg4 */
- 0x0000, /* mpllb cfg5 */
- 0x0200, /* mpllb cfg6 */
- 0x0001, /* mpllb cfg7 */
- 0x0000, /* mpllb cfg8 */
- 0x0000, /* mpllb cfg9 */
- 0x0001, /* mpllb cfg10 */
- },
+static const struct intel_cx0pll_params mtl_c10_hdmi_tables[] = {
+ C10PLL_HDMI_PARAMS(25200, mtl_c10_hdmi_25_2), /* Consolidated Table */
+ C10PLL_HDMI_PARAMS(27000, mtl_c10_hdmi_27_0), /* Consolidated Table */
+ C10PLL_HDMI_PARAMS(27027, mtl_c10_hdmi_27027),
+ C10PLL_HDMI_PARAMS(28320, mtl_c10_hdmi_28320),
+ C10PLL_HDMI_PARAMS(30240, mtl_c10_hdmi_30240),
+ C10PLL_HDMI_PARAMS(31500, mtl_c10_hdmi_31500),
+ C10PLL_HDMI_PARAMS(36000, mtl_c10_hdmi_36000),
+ C10PLL_HDMI_PARAMS(40000, mtl_c10_hdmi_40000),
+ C10PLL_HDMI_PARAMS(49500, mtl_c10_hdmi_49500),
+ C10PLL_HDMI_PARAMS(50000, mtl_c10_hdmi_50000),
+ C10PLL_HDMI_PARAMS(57284, mtl_c10_hdmi_57284),
+ C10PLL_HDMI_PARAMS(58000, mtl_c10_hdmi_58000),
+ C10PLL_HDMI_PARAMS(65000, mtl_c10_hdmi_65000),
+ C10PLL_HDMI_PARAMS(71000, mtl_c10_hdmi_71000),
+ C10PLL_HDMI_PARAMS(74176, mtl_c10_hdmi_74176),
+ C10PLL_HDMI_PARAMS(74250, mtl_c10_hdmi_74_25), /* Consolidated Table */
+ C10PLL_HDMI_PARAMS(75000, mtl_c10_hdmi_75000),
+ C10PLL_HDMI_PARAMS(78750, mtl_c10_hdmi_78750),
+ C10PLL_HDMI_PARAMS(85500, mtl_c10_hdmi_85500),
+ C10PLL_HDMI_PARAMS(88750, mtl_c10_hdmi_88750),
+ C10PLL_HDMI_PARAMS(106500, mtl_c10_hdmi_106500),
+ C10PLL_HDMI_PARAMS(108000, mtl_c10_hdmi_108000),
+ C10PLL_HDMI_PARAMS(115500, mtl_c10_hdmi_115500),
+ C10PLL_HDMI_PARAMS(119000, mtl_c10_hdmi_119000),
+ C10PLL_HDMI_PARAMS(135000, mtl_c10_hdmi_135000),
+ C10PLL_HDMI_PARAMS(138500, mtl_c10_hdmi_138500),
+ C10PLL_HDMI_PARAMS(147160, mtl_c10_hdmi_147160),
+ C10PLL_HDMI_PARAMS(148352, mtl_c10_hdmi_148352),
+ C10PLL_HDMI_PARAMS(148500, mtl_c10_hdmi_148_5), /* Consolidated Table */
+ C10PLL_HDMI_PARAMS(154000, mtl_c10_hdmi_154000),
+ C10PLL_HDMI_PARAMS(162000, mtl_c10_hdmi_162000),
+ C10PLL_HDMI_PARAMS(167000, mtl_c10_hdmi_167000),
+ C10PLL_HDMI_PARAMS(197802, mtl_c10_hdmi_197802),
+ C10PLL_HDMI_PARAMS(198000, mtl_c10_hdmi_198000),
+ C10PLL_HDMI_PARAMS(209800, mtl_c10_hdmi_209800),
+ C10PLL_HDMI_PARAMS(241500, mtl_c10_hdmi_241500),
+ C10PLL_HDMI_PARAMS(262750, mtl_c10_hdmi_262750),
+ C10PLL_HDMI_PARAMS(268500, mtl_c10_hdmi_268500),
+ C10PLL_HDMI_PARAMS(296703, mtl_c10_hdmi_296703),
+ C10PLL_HDMI_PARAMS(297000, mtl_c10_hdmi_297000),
+ C10PLL_HDMI_PARAMS(319750, mtl_c10_hdmi_319750),
+ C10PLL_HDMI_PARAMS(497750, mtl_c10_hdmi_497750),
+ C10PLL_HDMI_PARAMS(592000, mtl_c10_hdmi_592000),
+ C10PLL_HDMI_PARAMS(593407, mtl_c10_hdmi_593407),
+ C10PLL_HDMI_PARAMS(594000, mtl_c10_hdmi_594), /* Consolidated Table */
+ {}
};
static const struct intel_c20pll_state mtl_c20_hdmi_27_0 = {
- .clock = 27000,
.tx = { 0xbe88, /* tx cfg0 */
0x9800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1815,7 +1760,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_27_0 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_74_25 = {
- .clock = 74250,
.tx = { 0xbe88, /* tx cfg0 */
0x9800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1840,7 +1784,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_74_25 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_148_5 = {
- .clock = 148500,
.tx = { 0xbe88, /* tx cfg0 */
0x9800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1865,7 +1808,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_148_5 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_594 = {
- .clock = 594000,
.tx = { 0xbe88, /* tx cfg0 */
0x9800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1890,7 +1832,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_594 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_300 = {
- .clock = 3000000,
.tx = { 0xbe98, /* tx cfg0 */
0x8800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1915,7 +1856,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_300 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_600 = {
- .clock = 6000000,
.tx = { 0xbe98, /* tx cfg0 */
0x8800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1940,7 +1880,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_600 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_800 = {
- .clock = 8000000,
.tx = { 0xbe98, /* tx cfg0 */
0x8800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1965,7 +1904,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_800 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_1000 = {
- .clock = 10000000,
.tx = { 0xbe98, /* tx cfg0 */
0x8800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -1990,7 +1928,6 @@ static const struct intel_c20pll_state mtl_c20_hdmi_1000 = {
};
static const struct intel_c20pll_state mtl_c20_hdmi_1200 = {
- .clock = 12000000,
.tx = { 0xbe98, /* tx cfg0 */
0x8800, /* tx cfg1 */
0x0000, /* tx cfg2 */
@@ -2014,21 +1951,20 @@ static const struct intel_c20pll_state mtl_c20_hdmi_1200 = {
},
};
-static const struct intel_c20pll_state * const mtl_c20_hdmi_tables[] = {
- &mtl_c20_hdmi_25_175,
- &mtl_c20_hdmi_27_0,
- &mtl_c20_hdmi_74_25,
- &mtl_c20_hdmi_148_5,
- &mtl_c20_hdmi_594,
- &mtl_c20_hdmi_300,
- &mtl_c20_hdmi_600,
- &mtl_c20_hdmi_800,
- &mtl_c20_hdmi_1000,
- &mtl_c20_hdmi_1200,
- NULL,
+static const struct intel_cx0pll_params mtl_c20_hdmi_tables[] = {
+ C20PLL_HDMI_PARAMS(27000, mtl_c20_hdmi_27_0),
+ C20PLL_HDMI_PARAMS(74250, mtl_c20_hdmi_74_25),
+ C20PLL_HDMI_PARAMS(148500, mtl_c20_hdmi_148_5),
+ C20PLL_HDMI_PARAMS(594000, mtl_c20_hdmi_594),
+ C20PLL_HDMI_PARAMS(300000, mtl_c20_hdmi_300),
+ C20PLL_HDMI_PARAMS(600000, mtl_c20_hdmi_600),
+ C20PLL_HDMI_PARAMS(800000, mtl_c20_hdmi_800),
+ C20PLL_HDMI_PARAMS(1000000, mtl_c20_hdmi_1000),
+ C20PLL_HDMI_PARAMS(1200000, mtl_c20_hdmi_1200),
+ {}
};
-static const struct intel_c10pll_state * const *
+static const struct intel_cx0pll_params *
intel_c10pll_tables_get(const struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
@@ -2103,21 +2039,99 @@ static bool cx0pll_state_is_dp(const struct intel_cx0pll_state *pll_state)
return c20pll_state_is_dp(&pll_state->c20);
}
+static int intel_c10pll_calc_port_clock(const struct intel_c10pll_state *pll_state)
+{
+ unsigned int frac_quot = 0, frac_rem = 0, frac_den = 1;
+ unsigned int multiplier, tx_clk_div, hdmi_div, refclk = 38400;
+ int tmpclk = 0;
+
+ if (pll_state->pll[0] & C10_PLL0_FRACEN) {
+ frac_quot = pll_state->pll[12] << 8 | pll_state->pll[11];
+ frac_rem = pll_state->pll[14] << 8 | pll_state->pll[13];
+ frac_den = pll_state->pll[10] << 8 | pll_state->pll[9];
+ }
+
+ multiplier = (REG_FIELD_GET8(C10_PLL3_MULTIPLIERH_MASK, pll_state->pll[3]) << 8 |
+ pll_state->pll[2]) / 2 + 16;
+
+ tx_clk_div = REG_FIELD_GET8(C10_PLL15_TXCLKDIV_MASK, pll_state->pll[15]);
+ hdmi_div = REG_FIELD_GET8(C10_PLL15_HDMIDIV_MASK, pll_state->pll[15]);
+
+ tmpclk = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, (multiplier << 16) + frac_quot) +
+ DIV_ROUND_CLOSEST(refclk * frac_rem, frac_den),
+ 10 << (tx_clk_div + 16));
+ tmpclk *= (hdmi_div ? 2 : 1);
+
+ return tmpclk;
+}
+
+static bool intel_c20phy_use_mpllb(const struct intel_c20pll_state *state)
+{
+ return state->tx[0] & C20_PHY_USE_MPLLB;
+}
+
+static int intel_c20pll_calc_port_clock(const struct intel_c20pll_state *pll_state)
+{
+ unsigned int frac, frac_en, frac_quot, frac_rem, frac_den;
+ unsigned int multiplier, refclk = 38400;
+ unsigned int tx_clk_div;
+ unsigned int ref_clk_mpllb_div;
+ unsigned int fb_clk_div4_en;
+ unsigned int ref, vco;
+ unsigned int tx_rate_mult;
+ unsigned int tx_rate = REG_FIELD_GET(C20_PHY_TX_RATE, pll_state->tx[0]);
+
+ if (intel_c20phy_use_mpllb(pll_state)) {
+ tx_rate_mult = 1;
+ frac_en = REG_FIELD_GET(C20_MPLLB_FRACEN, pll_state->mpllb[6]);
+ frac_quot = pll_state->mpllb[8];
+ frac_rem = pll_state->mpllb[9];
+ frac_den = pll_state->mpllb[7];
+ multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mpllb[0]);
+ tx_clk_div = REG_FIELD_GET(C20_MPLLB_TX_CLK_DIV_MASK, pll_state->mpllb[0]);
+ ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mpllb[6]);
+ fb_clk_div4_en = 0;
+ } else {
+ tx_rate_mult = 2;
+ frac_en = REG_FIELD_GET(C20_MPLLA_FRACEN, pll_state->mplla[6]);
+ frac_quot = pll_state->mplla[8];
+ frac_rem = pll_state->mplla[9];
+ frac_den = pll_state->mplla[7];
+ multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mplla[0]);
+ tx_clk_div = REG_FIELD_GET(C20_MPLLA_TX_CLK_DIV_MASK, pll_state->mplla[1]);
+ ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mplla[6]);
+ fb_clk_div4_en = REG_FIELD_GET(C20_FB_CLK_DIV4_EN, pll_state->mplla[0]);
+ }
+
+ if (frac_en)
+ frac = frac_quot + DIV_ROUND_CLOSEST(frac_rem, frac_den);
+ else
+ frac = 0;
+
+ ref = DIV_ROUND_CLOSEST(refclk * (1 << (1 + fb_clk_div4_en)), 1 << ref_clk_mpllb_div);
+ vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(ref, (multiplier << (17 - 2)) + frac) >> 17, 10);
+
+ return vco << tx_rate_mult >> tx_clk_div >> tx_rate;
+}
+
/*
* TODO: Convert the following to align with intel_c20pll_find_table() and
* intel_c20pll_calc_state_from_table().
*/
static int intel_c10pll_calc_state_from_table(struct intel_encoder *encoder,
- const struct intel_c10pll_state * const *tables,
+ const struct intel_cx0pll_params *tables,
bool is_dp, int port_clock, int lane_count,
struct intel_cx0pll_state *pll_state)
{
struct intel_display *display = to_intel_display(encoder);
int i;
- for (i = 0; tables[i]; i++) {
- if (port_clock == tables[i]->clock) {
- pll_state->c10 = *tables[i];
+ for (i = 0; tables[i].name; i++) {
+ int clock = intel_c10pll_calc_port_clock(tables[i].c10);
+
+ drm_WARN_ON(display->drm, !intel_dpll_clock_matches(clock, tables[i].clock_rate));
+ if (intel_dpll_clock_matches(port_clock, clock)) {
+ pll_state->c10 = *tables[i].c10;
intel_cx0pll_update_ssc(encoder, pll_state, is_dp);
intel_c10pll_update_pll(encoder, pll_state);
@@ -2139,7 +2153,7 @@ static int intel_c10pll_calc_state(const struct intel_crtc_state *crtc_state,
{
struct intel_display *display = to_intel_display(encoder);
bool is_dp = intel_crtc_has_dp_encoder(crtc_state);
- const struct intel_c10pll_state * const *tables;
+ const struct intel_cx0pll_params *tables;
int err;
tables = intel_c10pll_tables_get(crtc_state, encoder);
@@ -2166,33 +2180,6 @@ static int intel_c10pll_calc_state(const struct intel_crtc_state *crtc_state,
return 0;
}
-static int intel_c10pll_calc_port_clock(struct intel_encoder *encoder,
- const struct intel_c10pll_state *pll_state)
-{
- unsigned int frac_quot = 0, frac_rem = 0, frac_den = 1;
- unsigned int multiplier, tx_clk_div, hdmi_div, refclk = 38400;
- int tmpclk = 0;
-
- if (pll_state->pll[0] & C10_PLL0_FRACEN) {
- frac_quot = pll_state->pll[12] << 8 | pll_state->pll[11];
- frac_rem = pll_state->pll[14] << 8 | pll_state->pll[13];
- frac_den = pll_state->pll[10] << 8 | pll_state->pll[9];
- }
-
- multiplier = (REG_FIELD_GET8(C10_PLL3_MULTIPLIERH_MASK, pll_state->pll[3]) << 8 |
- pll_state->pll[2]) / 2 + 16;
-
- tx_clk_div = REG_FIELD_GET8(C10_PLL15_TXCLKDIV_MASK, pll_state->pll[15]);
- hdmi_div = REG_FIELD_GET8(C10_PLL15_HDMIDIV_MASK, pll_state->pll[15]);
-
- tmpclk = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, (multiplier << 16) + frac_quot) +
- DIV_ROUND_CLOSEST(refclk * frac_rem, frac_den),
- 10 << (tx_clk_div + 16));
- tmpclk *= (hdmi_div ? 2 : 1);
-
- return tmpclk;
-}
-
static int readout_enabled_lane_count(struct intel_encoder *encoder)
{
struct intel_display *display = to_intel_display(encoder);
@@ -2275,8 +2262,6 @@ static void intel_c10pll_readout_hw_state(struct intel_encoder *encoder,
intel_cx0_phy_transaction_end(encoder, wakeref);
- pll_state->clock = intel_c10pll_calc_port_clock(encoder, pll_state);
-
cx0pll_state->ssc_enabled = readout_ssc_state(encoder, true);
if (cx0pll_state->ssc_enabled != intel_c10pll_ssc_enabled(pll_state))
@@ -2321,8 +2306,7 @@ static void intel_c10pll_dump_hw_state(struct drm_printer *p,
unsigned int multiplier, tx_clk_div;
fracen = hw_state->pll[0] & C10_PLL0_FRACEN;
- drm_printf(p, "c10pll_hw_state: clock: %d, fracen: %s, ",
- hw_state->clock, str_yes_no(fracen));
+ drm_printf(p, "c10pll_hw_state: fracen: %s, ", str_yes_no(fracen));
if (fracen) {
frac_quot = hw_state->pll[12] << 8 | hw_state->pll[11];
@@ -2364,9 +2348,8 @@ static bool is_arrowlake_s_by_host_bridge(void)
return pdev && IS_ARROWLAKE_S_BY_HOST_BRIDGE_ID(host_bridge_pci_dev_id);
}
-static u16 intel_c20_hdmi_tmds_tx_cgf_1(const struct intel_crtc_state *crtc_state)
+static u16 intel_c20_hdmi_tmds_tx_cgf_1(struct intel_display *display)
{
- struct intel_display *display = to_intel_display(crtc_state);
u16 tx_misc;
u16 tx_dcc_cal_dac_ctrl_range = 8;
u16 tx_term_ctrl = 2;
@@ -2388,7 +2371,8 @@ static u16 intel_c20_hdmi_tmds_tx_cgf_1(const struct intel_crtc_state *crtc_stat
C20_PHY_TX_DCC_BYPASS | C20_PHY_TX_TERM_CTL(tx_term_ctrl));
}
-static int intel_c20_compute_hdmi_tmds_pll(const struct intel_crtc_state *crtc_state,
+static int intel_c20_compute_hdmi_tmds_pll(struct intel_display *display,
+ int port_clock,
struct intel_c20pll_state *pll_state)
{
u64 datarate;
@@ -2402,10 +2386,10 @@ static int intel_c20_compute_hdmi_tmds_pll(const struct intel_crtc_state *crtc_s
u8 mpllb_ana_freq_vco;
u8 mpll_div_multiplier;
- if (crtc_state->port_clock < 25175 || crtc_state->port_clock > 600000)
+ if (port_clock < 25175 || port_clock > 600000)
return -EINVAL;
- datarate = ((u64)crtc_state->port_clock * 1000) * 10;
+ datarate = ((u64)port_clock * 1000) * 10;
mpll_tx_clk_div = ilog2(div64_u64((u64)CLOCK_9999MHZ, (u64)datarate));
vco_freq_shift = ilog2(div64_u64((u64)CLOCK_4999MHZ * (u64)256, (u64)datarate));
vco_freq = (datarate << vco_freq_shift) >> 8;
@@ -2427,9 +2411,8 @@ static int intel_c20_compute_hdmi_tmds_pll(const struct intel_crtc_state *crtc_s
else
mpllb_ana_freq_vco = MPLLB_ANA_FREQ_VCO_0;
- pll_state->clock = crtc_state->port_clock;
pll_state->tx[0] = 0xbe88;
- pll_state->tx[1] = intel_c20_hdmi_tmds_tx_cgf_1(crtc_state);
+ pll_state->tx[1] = intel_c20_hdmi_tmds_tx_cgf_1(display);
pll_state->tx[2] = 0x0000;
pll_state->cmn[0] = 0x0500;
pll_state->cmn[1] = 0x0005;
@@ -2457,7 +2440,7 @@ static int intel_c20_compute_hdmi_tmds_pll(const struct intel_crtc_state *crtc_s
return 0;
}
-static const struct intel_c20pll_state * const *
+static const struct intel_cx0pll_params *
intel_c20_pll_tables_get(const struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
@@ -2625,20 +2608,25 @@ static void intel_c20_program_vdr_params(struct intel_encoder *encoder,
MB_WRITE_COMMITTED);
}
-static const struct intel_c20pll_state *
+static const struct intel_cx0pll_params *
intel_c20_pll_find_table(const struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
- const struct intel_c20pll_state * const *tables;
+ struct intel_display *display = to_intel_display(crtc_state);
+ const struct intel_cx0pll_params *tables;
int i;
tables = intel_c20_pll_tables_get(crtc_state, encoder);
if (!tables)
return NULL;
- for (i = 0; tables[i]; i++)
- if (crtc_state->port_clock == tables[i]->clock)
- return tables[i];
+ for (i = 0; tables[i].name; i++) {
+ int clock = intel_c20pll_calc_port_clock(tables[i].c20);
+
+ drm_WARN_ON(display->drm, !intel_dpll_clock_matches(clock, tables[i].clock_rate));
+ if (intel_dpll_clock_matches(crtc_state->port_clock, clock))
+ return &tables[i];
+ }
return NULL;
}
@@ -2647,13 +2635,13 @@ static int intel_c20pll_calc_state_from_table(const struct intel_crtc_state *crt
struct intel_encoder *encoder,
struct intel_cx0pll_state *pll_state)
{
- const struct intel_c20pll_state *table;
+ const struct intel_cx0pll_params *table;
table = intel_c20_pll_find_table(crtc_state, encoder);
if (!table)
return -EINVAL;
- pll_state->c20 = *table;
+ pll_state->c20 = *table->c20;
intel_cx0pll_update_ssc(encoder, pll_state, intel_crtc_has_dp_encoder(crtc_state));
@@ -2681,7 +2669,8 @@ static int intel_c20pll_calc_state(const struct intel_crtc_state *crtc_state,
/* TODO: Update SSC state for HDMI as well */
if (!is_dp && err)
- err = intel_c20_compute_hdmi_tmds_pll(crtc_state, &hw_state->cx0pll.c20);
+ err = intel_c20_compute_hdmi_tmds_pll(display, crtc_state->port_clock,
+ &hw_state->cx0pll.c20);
if (err)
return err;
@@ -2705,56 +2694,6 @@ int intel_cx0pll_calc_state(const struct intel_crtc_state *crtc_state,
return intel_c20pll_calc_state(crtc_state, encoder, hw_state);
}
-static bool intel_c20phy_use_mpllb(const struct intel_c20pll_state *state)
-{
- return state->tx[0] & C20_PHY_USE_MPLLB;
-}
-
-static int intel_c20pll_calc_port_clock(struct intel_encoder *encoder,
- const struct intel_c20pll_state *pll_state)
-{
- unsigned int frac, frac_en, frac_quot, frac_rem, frac_den;
- unsigned int multiplier, refclk = 38400;
- unsigned int tx_clk_div;
- unsigned int ref_clk_mpllb_div;
- unsigned int fb_clk_div4_en;
- unsigned int ref, vco;
- unsigned int tx_rate_mult;
- unsigned int tx_rate = REG_FIELD_GET(C20_PHY_TX_RATE, pll_state->tx[0]);
-
- if (intel_c20phy_use_mpllb(pll_state)) {
- tx_rate_mult = 1;
- frac_en = REG_FIELD_GET(C20_MPLLB_FRACEN, pll_state->mpllb[6]);
- frac_quot = pll_state->mpllb[8];
- frac_rem = pll_state->mpllb[9];
- frac_den = pll_state->mpllb[7];
- multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mpllb[0]);
- tx_clk_div = REG_FIELD_GET(C20_MPLLB_TX_CLK_DIV_MASK, pll_state->mpllb[0]);
- ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mpllb[6]);
- fb_clk_div4_en = 0;
- } else {
- tx_rate_mult = 2;
- frac_en = REG_FIELD_GET(C20_MPLLA_FRACEN, pll_state->mplla[6]);
- frac_quot = pll_state->mplla[8];
- frac_rem = pll_state->mplla[9];
- frac_den = pll_state->mplla[7];
- multiplier = REG_FIELD_GET(C20_MULTIPLIER_MASK, pll_state->mplla[0]);
- tx_clk_div = REG_FIELD_GET(C20_MPLLA_TX_CLK_DIV_MASK, pll_state->mplla[1]);
- ref_clk_mpllb_div = REG_FIELD_GET(C20_REF_CLK_MPLLB_DIV_MASK, pll_state->mplla[6]);
- fb_clk_div4_en = REG_FIELD_GET(C20_FB_CLK_DIV4_EN, pll_state->mplla[0]);
- }
-
- if (frac_en)
- frac = frac_quot + DIV_ROUND_CLOSEST(frac_rem, frac_den);
- else
- frac = 0;
-
- ref = DIV_ROUND_CLOSEST(refclk * (1 << (1 + fb_clk_div4_en)), 1 << ref_clk_mpllb_div);
- vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(ref, (multiplier << (17 - 2)) + frac) >> 17, 10);
-
- return vco << tx_rate_mult >> tx_clk_div >> tx_rate;
-}
-
static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
struct intel_cx0pll_state *cx0pll_state)
{
@@ -2823,8 +2762,6 @@ static void intel_c20pll_readout_hw_state(struct intel_encoder *encoder,
}
}
- pll_state->clock = intel_c20pll_calc_port_clock(encoder, pll_state);
-
intel_cx0_phy_transaction_end(encoder, wakeref);
cx0pll_state->ssc_enabled = readout_ssc_state(encoder, intel_c20phy_use_mpllb(pll_state));
@@ -2835,7 +2772,7 @@ static void intel_c20pll_dump_hw_state(struct drm_printer *p,
{
int i;
- drm_printf(p, "c20pll_hw_state: clock: %d\n", hw_state->clock);
+ drm_printf(p, "c20pll_hw_state:\n");
drm_printf(p,
"tx[0] = 0x%.4x, tx[1] = 0x%.4x, tx[2] = 0x%.4x\n",
hw_state->tx[0], hw_state->tx[1], hw_state->tx[2]);
@@ -2971,6 +2908,12 @@ static void intel_c20_pll_program(struct intel_display *display,
MB_WRITE_COMMITTED);
}
+static bool is_mplla_clock_rate(int clock)
+{
+ return intel_dpll_clock_matches(clock, 1000000) ||
+ intel_dpll_clock_matches(clock, 2000000);
+}
+
static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
const struct intel_cx0pll_state *pll_state,
int port_clock,
@@ -2996,7 +2939,7 @@ static void intel_program_port_clock_ctl(struct intel_encoder *encoder,
/* TODO: HDMI FRL */
/* DP2.0 10G and 20G rates enable MPLLA*/
- if (port_clock == 1000000 || port_clock == 2000000)
+ if (is_mplla_clock_rate(port_clock))
val |= pll_state->ssc_enabled ? XELPDP_SSC_ENABLE_PLLA : 0;
else
val |= pll_state->ssc_enabled ? XELPDP_SSC_ENABLE_PLLB : 0;
@@ -3223,7 +3166,6 @@ static u32 intel_cx0_get_pclk_pll_ack(u8 lane_mask)
static void intel_cx0pll_enable(struct intel_encoder *encoder,
const struct intel_cx0pll_state *pll_state)
{
- int port_clock = pll_state->use_c10 ? pll_state->c10.clock : pll_state->c20.clock;
struct intel_display *display = to_intel_display(encoder);
enum phy phy = intel_encoder_to_phy(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
@@ -3231,6 +3173,12 @@ static void intel_cx0pll_enable(struct intel_encoder *encoder,
u8 maxpclk_lane = lane_reversal ? INTEL_CX0_LANE1 :
INTEL_CX0_LANE0;
struct ref_tracker *wakeref = intel_cx0_phy_transaction_begin(encoder);
+ int port_clock;
+
+ if (pll_state->use_c10)
+ port_clock = intel_c10pll_calc_port_clock(&pll_state->c10);
+ else
+ port_clock = intel_c20pll_calc_port_clock(&pll_state->c20);
/*
* Lane reversal is never used in DP-alt mode, in that case the
@@ -3730,9 +3678,9 @@ int intel_cx0pll_calc_port_clock(struct intel_encoder *encoder,
const struct intel_cx0pll_state *pll_state)
{
if (intel_encoder_is_c10phy(encoder))
- return intel_c10pll_calc_port_clock(encoder, &pll_state->c10);
+ return intel_c10pll_calc_port_clock(&pll_state->c10);
- return intel_c20pll_calc_port_clock(encoder, &pll_state->c20);
+ return intel_c20pll_calc_port_clock(&pll_state->c20);
}
/*
@@ -3786,3 +3734,124 @@ void intel_cx0_pll_power_save_wa(struct intel_display *display)
intel_cx0pll_disable(encoder);
}
}
+
+static void intel_c10pll_verify_clock(struct intel_display *display,
+ int precomputed_clock,
+ const char *pll_state_name,
+ const struct intel_c10pll_state *pll_state,
+ bool is_precomputed_state)
+{
+ struct drm_printer p;
+ int clock;
+
+ clock = intel_c10pll_calc_port_clock(pll_state);
+
+ if (intel_dpll_clock_matches(clock, precomputed_clock))
+ return;
+
+ drm_warn(display->drm,
+ "PLL state %s (%s): clock difference too high: computed %d, pre-computed %d\n",
+ pll_state_name,
+ is_precomputed_state ? "precomputed" : "computed",
+ clock, precomputed_clock);
+
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ p = drm_dbg_printer(display->drm, DRM_UT_KMS, NULL);
+
+ drm_printf(&p, "PLL state %s (%s):\n",
+ pll_state_name,
+ is_precomputed_state ? "precomputed" : "computed");
+ intel_c10pll_dump_hw_state(&p, pll_state);
+}
+
+static void intel_c10pll_verify_params(struct intel_display *display,
+ const struct intel_cx0pll_params *pll_params)
+{
+ struct intel_c10pll_state pll_state;
+
+ intel_c10pll_verify_clock(display, pll_params->clock_rate, pll_params->name, pll_params->c10, true);
+
+ if (!pll_params->is_hdmi)
+ return;
+
+ intel_snps_hdmi_pll_compute_c10pll(&pll_state, pll_params->clock_rate);
+
+ intel_c10pll_verify_clock(display, pll_params->clock_rate, pll_params->name, &pll_state, false);
+}
+
+static void intel_c20pll_verify_clock(struct intel_display *display,
+ int precomputed_clock,
+ const char *pll_state_name,
+ const struct intel_c20pll_state *pll_state,
+ bool is_precomputed_state)
+{
+ struct drm_printer p;
+ int clock;
+
+ clock = intel_c20pll_calc_port_clock(pll_state);
+
+ if (intel_dpll_clock_matches(clock, precomputed_clock))
+ return;
+
+ drm_warn(display->drm,
+ "PLL state %s (%s): clock difference too high: computed %d, pre-computed %d\n",
+ pll_state_name,
+ is_precomputed_state ? "precomputed" : "computed",
+ clock, precomputed_clock);
+
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ p = drm_dbg_printer(display->drm, DRM_UT_KMS, NULL);
+
+ drm_printf(&p, "PLL state %s (%s):\n",
+ pll_state_name,
+ is_precomputed_state ? "precomputed" : "computed");
+ intel_c20pll_dump_hw_state(&p, pll_state);
+}
+
+static void intel_c20pll_verify_params(struct intel_display *display,
+ const struct intel_cx0pll_params *pll_params)
+{
+ struct intel_c20pll_state pll_state;
+
+ intel_c20pll_verify_clock(display, pll_params->clock_rate, pll_params->name, pll_params->c20, true);
+
+ if (!pll_params->is_hdmi)
+ return;
+
+ if (intel_c20_compute_hdmi_tmds_pll(display, pll_params->clock_rate, &pll_state) != 0)
+ return;
+
+ intel_c20pll_verify_clock(display, pll_params->clock_rate, pll_params->name, &pll_state, false);
+}
+
+static void intel_cx0pll_verify_tables(struct intel_display *display,
+ const struct intel_cx0pll_params *tables)
+{
+ int i;
+
+ for (i = 0; tables[i].name; i++) {
+ if (tables[i].is_c10)
+ intel_c10pll_verify_params(display, &tables[i]);
+ else
+ intel_c20pll_verify_params(display, &tables[i]);
+ }
+}
+
+void intel_cx0pll_verify_plls(struct intel_display *display)
+{
+ /* C10 */
+ intel_cx0pll_verify_tables(display, mtl_c10_edp_tables);
+ intel_cx0pll_verify_tables(display, mtl_c10_dp_tables);
+ intel_cx0pll_verify_tables(display, mtl_c10_hdmi_tables);
+
+ /* C20 */
+ intel_cx0pll_verify_tables(display, xe2hpd_c20_edp_tables);
+ intel_cx0pll_verify_tables(display, mtl_c20_dp_tables);
+ intel_cx0pll_verify_tables(display, xe2hpd_c20_dp_tables);
+ intel_cx0pll_verify_tables(display, xe3lpd_c20_dp_edp_tables);
+ intel_cx0pll_verify_tables(display, mtl_c20_hdmi_tables);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy.h b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
index ae98ac23ea22..1d4480b8bf39 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy.h
@@ -25,8 +25,8 @@ struct intel_dpll_hw_state;
struct intel_encoder;
struct intel_hdmi;
-void intel_clear_response_ready_flag(struct intel_encoder *encoder,
- int lane);
+void intel_cx0_clear_response_ready_flag(struct intel_encoder *encoder,
+ int lane);
bool intel_encoder_is_c10phy(struct intel_encoder *encoder);
void intel_mtl_pll_enable(struct intel_encoder *encoder,
struct intel_dpll *pll,
@@ -77,6 +77,7 @@ bool intel_mtl_tbt_pll_readout_hw_state(struct intel_display *display,
struct intel_dpll_hw_state *hw_state);
int intel_mtl_tbt_calc_port_clock(struct intel_encoder *encoder);
+void intel_cx0pll_verify_plls(struct intel_display *display);
void intel_cx0_pll_power_save_wa(struct intel_display *display);
void intel_lnl_mac_transmit_lfps(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
index 658890f73515..152a4e751bdc 100644
--- a/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_cx0_phy_regs.h
@@ -78,10 +78,10 @@
#define XELPDP_PCLK_PLL_ENABLE_TIMEOUT_US 3200
#define XELPDP_PCLK_PLL_DISABLE_TIMEOUT_US 20
#define XELPDP_PORT_BUF_SOC_READY_TIMEOUT_US 100
-#define XELPDP_PORT_RESET_START_TIMEOUT_US 5
+#define XELPDP_PORT_RESET_START_TIMEOUT_US 10
#define XELPDP_PORT_POWERDOWN_UPDATE_TIMEOUT_MS 2
#define XELPDP_PORT_RESET_END_TIMEOUT_MS 15
-#define XELPDP_REFCLK_ENABLE_TIMEOUT_US 1
+#define XELPDP_REFCLK_ENABLE_TIMEOUT_US 10
#define _XELPDP_PORT_BUF_CTL1_LN0_A 0x64004
#define _XELPDP_PORT_BUF_CTL1_LN0_B 0x64104
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index cb91d07cdaa6..7f1576bfe4b0 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -34,7 +34,6 @@
#include <drm/drm_print.h>
#include <drm/drm_privacy_screen_consumer.h>
-#include "i915_reg.h"
#include "icl_dsi.h"
#include "intel_alpm.h"
#include "intel_audio.h"
@@ -53,6 +52,7 @@
#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_display_utils.h"
+#include "intel_display_wa.h"
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
#include "intel_dp.h"
@@ -1402,8 +1402,7 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
int level;
/* Wa_16011342517:adl-p */
- if (display->platform.alderlake_p &&
- IS_DISPLAY_STEP(display, STEP_A0, STEP_D0)) {
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_16011342517)) {
if ((intel_encoder_is_hdmi(encoder) &&
crtc_state->port_clock == 594000) ||
(intel_encoder_is_dp(encoder) &&
@@ -4247,13 +4246,15 @@ void intel_ddi_get_clock(struct intel_encoder *encoder,
static void xe3plpd_ddi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(encoder);
+
intel_lt_phy_pll_readout_hw_state(encoder, crtc_state, &crtc_state->dpll_hw_state.ltpll);
if (crtc_state->dpll_hw_state.ltpll.tbt_mode)
crtc_state->port_clock = intel_mtl_tbt_calc_port_clock(encoder);
else
crtc_state->port_clock =
- intel_lt_phy_calc_port_clock(encoder, crtc_state);
+ intel_lt_phy_calc_port_clock(display, &crtc_state->dpll_hw_state.ltpll);
intel_ddi_get_config(encoder, crtc_state);
}
@@ -4586,8 +4587,10 @@ intel_ddi_port_sync_transcoders(const struct intel_crtc_state *ref_crtc_state,
/*
* We don't enable port sync on BDW due to missing w/as and
* due to not having adjusted the modeset sequence appropriately.
+ * From, xe3lpd onwards we have defeatured this with reference to
+ * Wa_16024710867
*/
- if (DISPLAY_VER(display) < 9)
+ if (!IS_DISPLAY_VER(display, 9, 20))
return 0;
if (!intel_crtc_has_type(ref_crtc_state, INTEL_OUTPUT_DP))
diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
index a7ce3b875e06..f30f3f8ebee1 100644
--- a/drivers/gpu/drm/i915/display/intel_de.h
+++ b/drivers/gpu/drm/i915/display/intel_de.h
@@ -6,6 +6,8 @@
#ifndef __INTEL_DE_H__
#define __INTEL_DE_H__
+#include <drm/drm_print.h>
+
#include "intel_display_core.h"
#include "intel_dmc_wl.h"
#include "intel_dsb.h"
@@ -34,15 +36,18 @@ intel_de_read(struct intel_display *display, i915_reg_t reg)
static inline u8
intel_de_read8(struct intel_display *display, i915_reg_t reg)
{
- u8 val;
-
- intel_dmc_wl_get(display, reg);
+ /* this is only used on VGA registers (possible on pre-g4x) */
+ drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 5 || display->platform.g4x);
- val = intel_uncore_read8(__to_uncore(display), reg);
+ return intel_uncore_read8(__to_uncore(display), reg);
+}
- intel_dmc_wl_put(display, reg);
+static inline void
+intel_de_write8(struct intel_display *display, i915_reg_t reg, u8 val)
+{
+ drm_WARN_ON(display->drm, DISPLAY_VER(display) >= 5 || display->platform.g4x);
- return val;
+ intel_uncore_write8(__to_uncore(display), reg, val);
}
static inline u64
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 3b8ba8ab76a1..b18ce0c36a64 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -50,7 +50,6 @@
#include "g4x_hdmi.h"
#include "hsw_ips.h"
#include "i915_config.h"
-#include "i915_reg.h"
#include "i9xx_plane.h"
#include "i9xx_plane_regs.h"
#include "i9xx_wm.h"
@@ -86,7 +85,6 @@
#include "intel_dpll.h"
#include "intel_dpll_mgr.h"
#include "intel_dpt.h"
-#include "intel_dpt_common.h"
#include "intel_drrs.h"
#include "intel_dsb.h"
#include "intel_dsi.h"
@@ -455,7 +453,7 @@ void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state)
}
/* Wa_22012358565:adl-p */
- if (DISPLAY_VER(display) == 13)
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_22012358565))
intel_de_rmw(display, PIPE_ARB_CTL(display, pipe),
0, PIPE_ARB_USE_PROG_SLOTS);
@@ -709,7 +707,7 @@ static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state)
tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP;
/* Wa_14010547955:dg2 */
- if (display->platform.dg2)
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_14010547955))
tmp |= DG2_RENDER_CCSTAG_4_3_EN;
intel_de_write(display, PIPE_CHICKEN(pipe), tmp);
@@ -1008,6 +1006,28 @@ static bool intel_casf_disabling(const struct intel_crtc_state *old_crtc_state,
return is_disabling(hw.casf_params.casf_enable, old_crtc_state, new_crtc_state);
}
+static bool intel_crtc_lobf_enabling(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ if (!new_crtc_state->hw.active)
+ return false;
+
+ return is_enabling(has_lobf, old_crtc_state, new_crtc_state) ||
+ (new_crtc_state->has_lobf &&
+ (new_crtc_state->update_lrr || new_crtc_state->update_m_n));
+}
+
+static bool intel_crtc_lobf_disabling(const struct intel_crtc_state *old_crtc_state,
+ const struct intel_crtc_state *new_crtc_state)
+{
+ if (!old_crtc_state->hw.active)
+ return false;
+
+ return is_disabling(has_lobf, old_crtc_state, new_crtc_state) ||
+ (old_crtc_state->has_lobf &&
+ (new_crtc_state->update_lrr || new_crtc_state->update_m_n));
+}
+
#undef is_disabling
#undef is_enabling
@@ -1050,12 +1070,13 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (audio_enabling(old_crtc_state, new_crtc_state))
intel_encoders_audio_enable(state, crtc);
- if (intel_display_wa(display, 14011503117)) {
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_14011503117)) {
if (old_crtc_state->pch_pfit.enabled != new_crtc_state->pch_pfit.enabled)
adl_scaler_ecc_unmask(new_crtc_state);
}
- intel_alpm_post_plane_update(state, crtc);
+ if (intel_crtc_lobf_enabling(old_crtc_state, new_crtc_state))
+ intel_alpm_lobf_enable(new_crtc_state);
intel_psr_post_plane_update(state, crtc);
}
@@ -1152,7 +1173,9 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
enum pipe pipe = crtc->pipe;
- intel_alpm_pre_plane_update(state, crtc);
+ if (intel_crtc_lobf_disabling(old_crtc_state, new_crtc_state))
+ intel_alpm_lobf_disable(new_crtc_state);
+
intel_psr_pre_plane_update(state, crtc);
if (intel_crtc_vrr_disabling(state, crtc)) {
@@ -1614,7 +1637,6 @@ static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_sta
}
intel_set_transcoder_timings(crtc_state);
- intel_vrr_set_transcoder_timings(crtc_state);
if (cpu_transcoder != TRANSCODER_EDP)
intel_de_write(display, TRANS_MULT(display, cpu_transcoder),
@@ -4325,43 +4347,58 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
return 0;
}
-static int
-compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
- struct intel_crtc_state *crtc_state)
+static int bpc_to_bpp(int bpc)
{
- struct intel_display *display = to_intel_display(crtc_state);
- struct drm_connector *connector = conn_state->connector;
- const struct drm_display_info *info = &connector->display_info;
- int bpp;
-
- switch (conn_state->max_bpc) {
+ switch (bpc) {
case 6 ... 7:
- bpp = 6 * 3;
- break;
+ return 6 * 3;
case 8 ... 9:
- bpp = 8 * 3;
- break;
+ return 8 * 3;
case 10 ... 11:
- bpp = 10 * 3;
- break;
+ return 10 * 3;
case 12 ... 16:
- bpp = 12 * 3;
- break;
+ return 12 * 3;
default:
- MISSING_CASE(conn_state->max_bpc);
+ MISSING_CASE(bpc);
return -EINVAL;
}
+}
- if (bpp < crtc_state->pipe_bpp) {
+static int
+compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
+ struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct drm_connector *connector = conn_state->connector;
+ const struct drm_display_info *info = &connector->display_info;
+ int edid_bpc = info->bpc ? : 8;
+ int target_pipe_bpp;
+ int max_edid_bpp;
+
+ max_edid_bpp = bpc_to_bpp(edid_bpc);
+ if (max_edid_bpp < 0)
+ return max_edid_bpp;
+
+ target_pipe_bpp = bpc_to_bpp(conn_state->max_bpc);
+ if (target_pipe_bpp < 0)
+ return target_pipe_bpp;
+
+ /*
+ * The maximum pipe BPP is the minimum of the max platform BPP and
+ * the max EDID BPP.
+ */
+ crtc_state->max_pipe_bpp = min(crtc_state->pipe_bpp, max_edid_bpp);
+
+ if (target_pipe_bpp < crtc_state->pipe_bpp) {
drm_dbg_kms(display->drm,
- "[CONNECTOR:%d:%s] Limiting display bpp to %d "
+ "[CONNECTOR:%d:%s] Limiting target display pipe bpp to %d "
"(EDID bpp %d, max requested bpp %d, max platform bpp %d)\n",
connector->base.id, connector->name,
- bpp, 3 * info->bpc,
+ target_pipe_bpp, 3 * info->bpc,
3 * conn_state->max_requested_bpc,
crtc_state->pipe_bpp);
- crtc_state->pipe_bpp = bpp;
+ crtc_state->pipe_bpp = target_pipe_bpp;
}
return 0;
@@ -5459,7 +5496,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(dsc.config.nsl_bpg_offset);
PIPE_CONF_CHECK_BOOL(dsc.compression_enable);
- PIPE_CONF_CHECK_I(dsc.num_streams);
+ PIPE_CONF_CHECK_I(dsc.slice_config.streams_per_pipe);
PIPE_CONF_CHECK_I(dsc.compressed_bpp_x16);
PIPE_CONF_CHECK_BOOL(splitter.enable);
@@ -7357,9 +7394,6 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
intel_psr_trigger_frame_change_event(new_crtc_state->dsb_commit,
state, crtc);
- intel_psr_wait_for_idle_dsb(new_crtc_state->dsb_commit,
- new_crtc_state);
-
if (new_crtc_state->use_dsb)
intel_dsb_vblank_evade(state, new_crtc_state->dsb_commit);
@@ -7390,9 +7424,37 @@ static void intel_atomic_dsb_finish(struct intel_atomic_state *state,
new_crtc_state->dsb_color);
if (new_crtc_state->use_dsb && !intel_color_uses_chained_dsb(new_crtc_state)) {
- intel_dsb_wait_vblanks(new_crtc_state->dsb_commit, 1);
+ /*
+ * Dsb wait vblank may or may not skip. Let's remove it for PSR
+ * trans push case to ensure we are not waiting two vblanks
+ */
+ if (!intel_psr_use_trans_push(new_crtc_state))
+ intel_dsb_wait_vblanks(new_crtc_state->dsb_commit, 1);
intel_vrr_send_push(new_crtc_state->dsb_commit, new_crtc_state);
+
+ /*
+ * Wait for idle is needed for corner case where PSR HW
+ * is transitioning into DEEP_SLEEP/SRDENT_OFF when
+ * new Frame Change event comes in. It is ok to do it
+ * here for both Frame Change mechanism (trans push
+ * and register write).
+ */
+ intel_psr_wait_for_idle_dsb(new_crtc_state->dsb_commit,
+ new_crtc_state);
+
+ /*
+ * In case PSR uses trans push as a "frame change" event and
+ * VRR is not in use we need to wait vblank. Otherwise we may
+ * miss selective updates. DSB skips all waits while PSR is
+ * active. Check push send is skipped as well because trans push
+ * send bit is not reset by the HW if VRR is not
+ * enabled -> we may start configuring new selective
+ * update while previous is not complete.
+ */
+ if (intel_psr_use_trans_push(new_crtc_state))
+ intel_dsb_wait_vblanks(new_crtc_state->dsb_commit, 1);
+
intel_dsb_wait_for_delayed_vblank(state, new_crtc_state->dsb_commit);
intel_vrr_check_push_sent(new_crtc_state->dsb_commit,
new_crtc_state);
@@ -8001,6 +8063,25 @@ void intel_setup_outputs(struct intel_display *display)
drm_helper_move_panel_connectors_to_head(display->drm);
}
+int intel_max_uncompressed_dotclock(struct intel_display *display)
+{
+ int max_dotclock = display->cdclk.max_dotclk_freq;
+ int limit = max_dotclock;
+
+ if (DISPLAY_VERx100(display) == 3002)
+ limit = 937500;
+ else if (DISPLAY_VER(display) >= 30)
+ limit = 1350000;
+ /*
+ * Note: For other platforms though there are limits given
+ * in the Bspec, however the limit is intentionally not
+ * enforced to avoid regressions, unless real issues are
+ * observed.
+ */
+
+ return min(max_dotclock, limit);
+}
+
static int max_dotclock(struct intel_display *display)
{
int max_dotclock = display->cdclk.max_dotclk_freq;
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index f8e6e4e82722..552a59d19e0f 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -135,32 +135,6 @@ enum tc_port {
I915_MAX_TC_PORTS
};
-enum aux_ch {
- AUX_CH_NONE = -1,
-
- AUX_CH_A,
- AUX_CH_B,
- AUX_CH_C,
- AUX_CH_D,
- AUX_CH_E, /* ICL+ */
- AUX_CH_F,
- AUX_CH_G,
- AUX_CH_H,
- AUX_CH_I,
-
- /* tgl+ */
- AUX_CH_USBC1 = AUX_CH_D,
- AUX_CH_USBC2,
- AUX_CH_USBC3,
- AUX_CH_USBC4,
- AUX_CH_USBC5,
- AUX_CH_USBC6,
-
- /* XE_LPD repositions D/E offsets and bitfields */
- AUX_CH_D_XELPD = AUX_CH_USBC5,
- AUX_CH_E_XELPD,
-};
-
enum phy {
PHY_NONE = -1,
@@ -488,6 +462,7 @@ void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc,
struct intel_link_m_n *m_n);
int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n);
int intel_crtc_dotclock(const struct intel_crtc_state *pipe_config);
+int intel_max_uncompressed_dotclock(struct intel_display *display);
enum intel_display_power_domain intel_port_to_power_domain(struct intel_digital_port *dig_port);
enum intel_display_power_domain
intel_aux_power_domain(struct intel_digital_port *dig_port);
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index aba13e8a9051..2614c4863c87 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -13,9 +13,9 @@
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_gmd_misc_regs.h>
#include "hsw_ips.h"
-#include "i915_reg.h"
#include "i9xx_wm_regs.h"
#include "intel_alpm.h"
#include "intel_bo.h"
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c
index c32d65727642..a8ef1e6193b8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.c
+++ b/drivers/gpu/drm/i915/display/intel_display_device.c
@@ -10,7 +10,6 @@
#include <drm/drm_print.h>
#include <drm/intel/pciids.h>
-#include "i915_reg.h"
#include "intel_cx0_phy_regs.h"
#include "intel_de.h"
#include "intel_display.h"
@@ -20,6 +19,7 @@
#include "intel_display_reg_defs.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_wa.h"
#include "intel_fbc.h"
#include "intel_step.h"
@@ -1540,9 +1540,9 @@ probe_gmdid_display(struct intel_display *display, struct intel_display_ip_ver *
return NULL;
}
- gmd_id.ver = REG_FIELD_GET(GMD_ID_ARCH_MASK, val);
- gmd_id.rel = REG_FIELD_GET(GMD_ID_RELEASE_MASK, val);
- gmd_id.step = REG_FIELD_GET(GMD_ID_STEP, val);
+ gmd_id.ver = REG_FIELD_GET(GMD_ID_DISPLAY_ARCH_MASK, val);
+ gmd_id.rel = REG_FIELD_GET(GMD_ID_DISPLAY_RELEASE_MASK, val);
+ gmd_id.step = REG_FIELD_GET(GMD_ID_DISPLAY_STEP, val);
for (i = 0; i < ARRAY_SIZE(gmdid_display_map); i++) {
if (gmd_id.ver == gmdid_display_map[i].ver &&
@@ -1774,7 +1774,7 @@ static void __intel_display_device_info_runtime_init(struct intel_display *displ
display_runtime->port_mask |= BIT(PORT_F);
/* Wa_14011765242: adl-s A0,A1 */
- if (display->platform.alderlake_s && IS_DISPLAY_STEP(display, STEP_A0, STEP_A2))
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_14011765242))
for_each_pipe(display, pipe)
display_runtime->num_scalers[pipe] = 0;
else if (DISPLAY_VER(display) >= 11) {
diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h
index 13f2a629981f..e84c190dcc4f 100644
--- a/drivers/gpu/drm/i915/display/intel_display_device.h
+++ b/drivers/gpu/drm/i915/display/intel_display_device.h
@@ -197,6 +197,7 @@ struct intel_display_platforms {
#define HAS_PSR(__display) (DISPLAY_INFO(__display)->has_psr)
#define HAS_PSR_HW_TRACKING(__display) (DISPLAY_INFO(__display)->has_psr_hw_tracking)
#define HAS_PSR2_SEL_FETCH(__display) (DISPLAY_VER(__display) >= 12)
+#define HAS_PSR_TRANS_PUSH_FRAME_CHANGE(__display) (DISPLAY_VER(__display) >= 20)
#define HAS_SAGV(__display) (DISPLAY_VER(__display) >= 9 && \
!(__display)->platform.broxton && !(__display)->platform.geminilake)
#define HAS_TRANSCODER(__display, trans) ((DISPLAY_RUNTIME_INFO(__display)->cpu_transcoder_mask & \
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index 268b1de45b81..23bfecc983e8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -208,16 +208,12 @@ int intel_display_driver_probe_noirq(struct intel_display *display)
intel_bios_init(display);
- ret = intel_vga_register(display);
- if (ret)
- goto cleanup_bios;
-
intel_psr_dc5_dc6_wa_init(display);
/* FIXME: completely on the wrong abstraction layer */
ret = intel_power_domains_init(display);
if (ret < 0)
- goto cleanup_vga;
+ goto cleanup_bios;
intel_pmdemand_init_early(display);
@@ -229,7 +225,7 @@ int intel_display_driver_probe_noirq(struct intel_display *display)
display->hotplug.dp_wq = alloc_ordered_workqueue("intel-dp", 0);
if (!display->hotplug.dp_wq) {
ret = -ENOMEM;
- goto cleanup_vga_client_pw_domain_dmc;
+ goto cleanup_pw_domain_dmc;
}
display->wq.modeset = alloc_ordered_workqueue("i915_modeset", 0);
@@ -245,13 +241,13 @@ int intel_display_driver_probe_noirq(struct intel_display *display)
goto cleanup_wq_modeset;
}
- display->wq.cleanup = alloc_workqueue("i915_cleanup", WQ_HIGHPRI, 0);
+ display->wq.cleanup = alloc_workqueue("i915_cleanup", WQ_HIGHPRI | WQ_PERCPU, 0);
if (!display->wq.cleanup) {
ret = -ENOMEM;
goto cleanup_wq_flip;
}
- display->wq.unordered = alloc_workqueue("display_unordered", 0, 0);
+ display->wq.unordered = alloc_workqueue("display_unordered", WQ_PERCPU, 0);
if (!display->wq.unordered) {
ret = -ENOMEM;
goto cleanup_wq_cleanup;
@@ -301,11 +297,9 @@ cleanup_wq_modeset:
destroy_workqueue(display->wq.modeset);
cleanup_wq_dp:
destroy_workqueue(display->hotplug.dp_wq);
-cleanup_vga_client_pw_domain_dmc:
+cleanup_pw_domain_dmc:
intel_dmc_fini(display);
intel_power_domains_driver_remove(display);
-cleanup_vga:
- intel_vga_unregister(display);
cleanup_bios:
intel_bios_driver_remove(display);
@@ -554,6 +548,8 @@ void intel_display_driver_register(struct intel_display *display)
if (!HAS_DISPLAY(display))
return;
+ intel_vga_register(display);
+
/* Must be done after probing outputs */
intel_opregion_register(display);
intel_acpi_video_register(display);
@@ -646,8 +642,6 @@ void intel_display_driver_remove_nogem(struct intel_display *display)
intel_power_domains_driver_remove(display);
- intel_vga_unregister(display);
-
intel_bios_driver_remove(display);
}
@@ -675,6 +669,8 @@ void intel_display_driver_unregister(struct intel_display *display)
acpi_video_unregister();
intel_opregion_unregister(display);
+
+ intel_vga_unregister(display);
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index 6e7e4654eb79..70c1bba7c0a8 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -5,8 +5,8 @@
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
+#include <drm/intel/intel_gmd_interrupt_regs.h>
-#include "i915_reg.h"
#include "icl_dsi_regs.h"
#include "intel_crtc.h"
#include "intel_de.h"
@@ -1619,7 +1619,7 @@ static void i915gm_irq_cstate_wa_enable(struct intel_display *display)
*/
if (display->irq.vblank_enabled++ == 0)
intel_de_write(display, SCPD0,
- _MASKED_BIT_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+ REG_MASKED_FIELD_ENABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
static void i915gm_irq_cstate_wa_disable(struct intel_display *display)
@@ -1628,7 +1628,7 @@ static void i915gm_irq_cstate_wa_disable(struct intel_display *display)
if (--display->irq.vblank_enabled == 0)
intel_de_write(display, SCPD0,
- _MASKED_BIT_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
+ REG_MASKED_FIELD_DISABLE(CSTATE_RENDER_CLOCK_GATE_DISABLE));
}
void i915gm_irq_cstate_wa(struct intel_display *display, bool enable)
@@ -2472,6 +2472,7 @@ void intel_display_irq_init(struct intel_display *display)
struct intel_display_irq_snapshot {
u32 derrmr;
+ u32 err_int;
};
struct intel_display_irq_snapshot *
@@ -2486,6 +2487,9 @@ intel_display_irq_snapshot_capture(struct intel_display *display)
if (DISPLAY_VER(display) >= 6 && DISPLAY_VER(display) < 20 && !HAS_GMCH(display))
snapshot->derrmr = intel_de_read(display, DERRMR);
+ if (DISPLAY_VER(display) == 7)
+ snapshot->err_int = intel_de_read(display, GEN7_ERR_INT);
+
return snapshot;
}
@@ -2496,4 +2500,5 @@ void intel_display_irq_snapshot_print(const struct intel_display_irq_snapshot *s
return;
drm_printf(p, "DERRMR: 0x%08x\n", snapshot->derrmr);
+ drm_printf(p, "ERR_INT: 0x%08x\n", snapshot->err_int);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_limits.h b/drivers/gpu/drm/i915/display/intel_display_limits.h
index cb3c9c665c44..453f7b720815 100644
--- a/drivers/gpu/drm/i915/display/intel_display_limits.h
+++ b/drivers/gpu/drm/i915/display/intel_display_limits.h
@@ -138,6 +138,32 @@ enum hpd_pin {
HPD_NUM_PINS
};
+enum aux_ch {
+ AUX_CH_NONE = -1,
+
+ AUX_CH_A,
+ AUX_CH_B,
+ AUX_CH_C,
+ AUX_CH_D,
+ AUX_CH_E, /* ICL+ */
+ AUX_CH_F,
+ AUX_CH_G,
+ AUX_CH_H,
+ AUX_CH_I,
+
+ /* tgl+ */
+ AUX_CH_USBC1 = AUX_CH_D,
+ AUX_CH_USBC2,
+ AUX_CH_USBC3,
+ AUX_CH_USBC4,
+ AUX_CH_USBC5,
+ AUX_CH_USBC6,
+
+ /* XE_LPD repositions D/E offsets and bitfields */
+ AUX_CH_D_XELPD = AUX_CH_USBC5,
+ AUX_CH_E_XELPD,
+};
+
enum intel_color_block {
INTEL_PLANE_CB_PRE_CSC_LUT,
INTEL_PLANE_CB_CSC,
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index d27397f43863..ec96b141c74c 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -7,8 +7,8 @@
#include <linux/string_helpers.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_pcode_regs.h>
-#include "i915_reg.h"
#include "intel_backlight_regs.h"
#include "intel_cdclk.h"
#include "intel_clock_gating.h"
@@ -21,12 +21,12 @@
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_display_utils.h"
+#include "intel_display_wa.h"
#include "intel_dmc.h"
#include "intel_dram.h"
#include "intel_mchbar_regs.h"
#include "intel_parent.h"
#include "intel_pch_refclk.h"
-#include "intel_pcode.h"
#include "intel_pmdemand.h"
#include "intel_pps_regs.h"
#include "intel_snps_phy.h"
@@ -646,7 +646,7 @@ queue_async_put_domains_work(struct i915_power_domains *power_domains,
power.domains);
drm_WARN_ON(display->drm, power_domains->async_put_wakeref);
power_domains->async_put_wakeref = wakeref;
- drm_WARN_ON(display->drm, !queue_delayed_work(system_unbound_wq,
+ drm_WARN_ON(display->drm, !queue_delayed_work(system_dfl_wq,
&power_domains->async_put_work,
msecs_to_jiffies(delay_ms)));
}
@@ -1260,7 +1260,7 @@ static u32 hsw_read_dcomp(struct intel_display *display)
static void hsw_write_dcomp(struct intel_display *display, u32 val)
{
if (display->platform.haswell) {
- if (intel_pcode_write(display->drm, GEN6_PCODE_WRITE_D_COMP, val))
+ if (intel_parent_pcode_write(display, GEN6_PCODE_WRITE_D_COMP, val))
drm_dbg_kms(display->drm, "Failed to write to D_COMP\n");
} else {
intel_de_write(display, D_COMP_BDW, val);
@@ -1622,8 +1622,7 @@ static void tgl_bw_buddy_init(struct intel_display *display)
if (display->platform.dgfx && !display->platform.dg1)
return;
- if (display->platform.alderlake_s ||
- (display->platform.rocketlake && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)))
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_1409767108))
/* Wa_1409767108 */
table = wa_1409767108_buddy_page_masks;
else
@@ -1646,7 +1645,7 @@ static void tgl_bw_buddy_init(struct intel_display *display)
table[config].page_mask);
/* Wa_22010178259:tgl,dg1,rkl,adl-s */
- if (DISPLAY_VER(display) == 12)
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_22010178259))
intel_de_rmw(display, BW_BUDDY_CTL(i),
BW_BUDDY_TLB_REQ_TIMER_MASK,
BW_BUDDY_TLB_REQ_TIMER(0x8));
@@ -1663,8 +1662,7 @@ static void icl_display_core_init(struct intel_display *display,
gen9_set_dc_state(display, DC_STATE_DISABLE);
/* Wa_14011294188:ehl,jsl,tgl,rkl,adl-s */
- if (INTEL_PCH_TYPE(display) >= PCH_TGP &&
- INTEL_PCH_TYPE(display) < PCH_DG1)
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_14011294188))
intel_de_rmw(display, SOUTH_DSPCLK_GATE_D, 0,
PCH_DPMGUNIT_CLOCK_GATE_DISABLE);
@@ -1718,17 +1716,17 @@ static void icl_display_core_init(struct intel_display *display,
intel_dmc_load_program(display);
/* Wa_14011508470:tgl,dg1,rkl,adl-s,adl-p,dg2 */
- if (IS_DISPLAY_VERx100(display, 1200, 1300))
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_14011508470))
intel_de_rmw(display, GEN11_CHICKEN_DCPR_2, 0,
DCPR_CLEAR_MEMSTAT_DIS | DCPR_SEND_RESP_IMM |
DCPR_MASK_LPMODE | DCPR_MASK_MAXLATENCY_MEMUP_CLR);
/* Wa_14011503030:xelpd */
- if (DISPLAY_VER(display) == 13)
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_14011503030))
intel_de_write(display, XELPD_DISPLAY_ERR_FATAL_MASK, ~0);
/* Wa_15013987218 */
- if (DISPLAY_VER(display) == 20) {
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_15013987218)) {
intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
0, PCH_GMBUSUNIT_CLOCK_GATE_DISABLE);
intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
@@ -2267,8 +2265,9 @@ void intel_display_power_suspend_late(struct intel_display *display, bool s2idle
}
/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
- if (INTEL_PCH_TYPE(display) >= PCH_CNP && INTEL_PCH_TYPE(display) < PCH_DG1)
- intel_de_rmw(display, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_14010685332))
+ intel_de_rmw(display, SOUTH_CHICKEN1,
+ SBCLK_RUN_REFCLK_DIS, SBCLK_RUN_REFCLK_DIS);
}
void intel_display_power_resume_early(struct intel_display *display)
@@ -2282,7 +2281,7 @@ void intel_display_power_resume_early(struct intel_display *display)
}
/* Tweaked Wa_14010685332:cnp,icp,jsp,mcc,tgp,adp */
- if (INTEL_PCH_TYPE(display) >= PCH_CNP && INTEL_PCH_TYPE(display) < PCH_DG1)
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_14010685332))
intel_de_rmw(display, SOUTH_CHICKEN1, SBCLK_RUN_REFCLK_DIS, 0);
intel_power_domains_resume(display);
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c
index c559ff000e67..65204d68a759 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_map.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c
@@ -112,7 +112,6 @@ static const struct i915_power_well_desc hsw_power_wells_main[] = {
.id = HSW_DISP_PW_GLOBAL),
),
.ops = &hsw_power_well_ops,
- .has_vga = true,
},
};
@@ -146,7 +145,6 @@ static const struct i915_power_well_desc bdw_power_wells_main[] = {
.id = HSW_DISP_PW_GLOBAL),
),
.ops = &hsw_power_well_ops,
- .has_vga = true,
.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
},
};
@@ -390,7 +388,6 @@ static const struct i915_power_well_desc skl_power_wells_main[] = {
.id = SKL_DISP_PW_2),
),
.ops = &hsw_power_well_ops,
- .has_vga = true,
.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
.has_fuses = true,
}, {
@@ -469,7 +466,6 @@ static const struct i915_power_well_desc bxt_power_wells_main[] = {
.id = SKL_DISP_PW_2),
),
.ops = &hsw_power_well_ops,
- .has_vga = true,
.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
.has_fuses = true,
}, {
@@ -572,7 +568,6 @@ static const struct i915_power_well_desc glk_power_wells_main[] = {
.id = SKL_DISP_PW_2),
),
.ops = &hsw_power_well_ops,
- .has_vga = true,
.irq_pipe_mask = BIT(PIPE_B) | BIT(PIPE_C),
.has_fuses = true,
}, {
@@ -748,7 +743,6 @@ static const struct i915_power_well_desc icl_power_wells_main[] = {
.id = ICL_DISP_PW_3),
),
.ops = &hsw_power_well_ops,
- .has_vga = true,
.irq_pipe_mask = BIT(PIPE_B),
.has_fuses = true,
}, {
@@ -914,7 +908,6 @@ static const struct i915_power_well_desc tgl_power_wells_main[] = {
.id = ICL_DISP_PW_3),
),
.ops = &hsw_power_well_ops,
- .has_vga = true,
.irq_pipe_mask = BIT(PIPE_B),
.has_fuses = true,
}, {
@@ -1071,7 +1064,6 @@ static const struct i915_power_well_desc rkl_power_wells_main[] = {
),
.ops = &hsw_power_well_ops,
.irq_pipe_mask = BIT(PIPE_B),
- .has_vga = true,
.has_fuses = true,
}, {
.instances = &I915_PW_INSTANCES(
@@ -1166,7 +1158,6 @@ static const struct i915_power_well_desc dg1_power_wells_main[] = {
),
.ops = &hsw_power_well_ops,
.irq_pipe_mask = BIT(PIPE_B),
- .has_vga = true,
.has_fuses = true,
}, {
.instances = &I915_PW_INSTANCES(
@@ -1325,7 +1316,6 @@ static const struct i915_power_well_desc xelpd_power_wells_main[] = {
.id = SKL_DISP_PW_2),
),
.ops = &hsw_power_well_ops,
- .has_vga = true,
.has_fuses = true,
}, {
.instances = &I915_PW_INSTANCES(
@@ -1482,7 +1472,6 @@ static const struct i915_power_well_desc xelpdp_power_wells_main[] = {
.id = SKL_DISP_PW_2),
),
.ops = &hsw_power_well_ops,
- .has_vga = true,
.has_fuses = true,
}, {
.instances = &I915_PW_INSTANCES(
@@ -1649,7 +1638,6 @@ static const struct i915_power_well_desc xe3lpd_power_wells_main[] = {
.id = SKL_DISP_PW_2),
),
.ops = &hsw_power_well_ops,
- .has_vga = true,
.has_fuses = true,
}, {
.instances = &I915_PW_INSTANCES(
@@ -1722,7 +1710,6 @@ static const struct i915_power_well_desc wcl_power_wells_main[] = {
.id = SKL_DISP_PW_2),
),
.ops = &hsw_power_well_ops,
- .has_vga = true,
.has_fuses = true,
}, {
.instances = &I915_PW_INSTANCES(
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index db185a859133..f855f0f88694 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -6,8 +6,8 @@
#include <linux/iopoll.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_pcode_regs.h>
-#include "i915_reg.h"
#include "intel_backlight_regs.h"
#include "intel_combo_phy.h"
#include "intel_combo_phy_regs.h"
@@ -18,6 +18,7 @@
#include "intel_display_regs.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
+#include "intel_display_wa.h"
#include "intel_dkl_phy.h"
#include "intel_dkl_phy_regs.h"
#include "intel_dmc.h"
@@ -27,7 +28,6 @@
#include "intel_dpll.h"
#include "intel_hotplug.h"
#include "intel_parent.h"
-#include "intel_pcode.h"
#include "intel_pps.h"
#include "intel_psr.h"
#include "intel_tc.h"
@@ -195,6 +195,48 @@ int intel_power_well_refcount(struct i915_power_well *power_well)
return power_well->count;
}
+static u32 dss_pipe_gating_bits(u8 irq_pipe_mask)
+{
+ u32 bits = 0;
+
+ if (irq_pipe_mask & BIT(PIPE_A))
+ bits |= DSS_PIPE_A_GATING_DISABLED;
+ if (irq_pipe_mask & BIT(PIPE_B))
+ bits |= DSS_PIPE_B_GATING_DISABLED;
+ if (irq_pipe_mask & BIT(PIPE_C))
+ bits |= DSS_PIPE_C_GATING_DISABLED;
+ if (irq_pipe_mask & BIT(PIPE_D))
+ bits |= DSS_PIPE_D_GATING_DISABLED;
+
+ return bits;
+}
+
+static void dss_pipe_gating_enable_disable(struct intel_display *display,
+ u8 irq_pipe_mask,
+ bool disable)
+{
+ u32 bits = dss_pipe_gating_bits(irq_pipe_mask);
+ u32 clear, set;
+
+ if (!bits)
+ return;
+
+ /*
+ * Single intel_de_rmw() for both enable/disable:
+ * - disable == true, set bits (disable clock gating)
+ * - disable == false, clear bits (re-enable clock gating)
+ */
+ set = disable ? bits : 0;
+ clear = disable ? 0 : bits;
+
+ intel_de_rmw(display, CLKGATE_DIS_DSSDSC, clear, set);
+
+ drm_dbg_kms(display->drm,
+ "DSS clock gating %sd for pipe_mask=0x%x (CLKGATE_DIS_DSSDSC=0x%08x)\n",
+ str_enable_disable(!disable), irq_pipe_mask,
+ intel_de_read(display, CLKGATE_DIS_DSSDSC));
+}
+
/*
* Starting with Haswell, we have a "Power Down Well" that can be turned off
* when not needed anymore. We have 4 registers that can request the power well
@@ -202,20 +244,25 @@ int intel_power_well_refcount(struct i915_power_well *power_well)
* requesting it to be enabled.
*/
static void hsw_power_well_post_enable(struct intel_display *display,
- u8 irq_pipe_mask, bool has_vga)
+ u8 irq_pipe_mask)
{
- if (has_vga)
- intel_vga_reset_io_mem(display);
-
- if (irq_pipe_mask)
+ if (irq_pipe_mask) {
gen8_irq_power_well_post_enable(display, irq_pipe_mask);
+
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_22021048059))
+ dss_pipe_gating_enable_disable(display, irq_pipe_mask, false);
+ }
}
static void hsw_power_well_pre_disable(struct intel_display *display,
u8 irq_pipe_mask)
{
- if (irq_pipe_mask)
+ if (irq_pipe_mask) {
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_22021048059))
+ dss_pipe_gating_enable_disable(display, irq_pipe_mask, true);
+
gen8_irq_power_well_pre_disable(display, irq_pipe_mask);
+ }
}
#define ICL_AUX_PW_TO_PHY(pw_idx) \
@@ -418,8 +465,7 @@ static void hsw_power_well_enable(struct intel_display *display,
}
hsw_power_well_post_enable(display,
- power_well->desc->irq_pipe_mask,
- power_well->desc->has_vga);
+ power_well->desc->irq_pipe_mask);
}
static void hsw_power_well_disable(struct intel_display *display,
@@ -522,7 +568,7 @@ static void icl_tc_cold_exit(struct intel_display *display)
int ret, tries = 0;
while (1) {
- ret = intel_pcode_write(display->drm, ICL_PCODE_EXIT_TCCOLD, 0);
+ ret = intel_parent_pcode_write(display, ICL_PCODE_EXIT_TCCOLD, 0);
if (ret != -EAGAIN || ++tries == 3)
break;
msleep(1);
@@ -806,7 +852,7 @@ void gen9_set_dc_state(struct intel_display *display, u32 state)
power_domains->dc_state, val & mask);
enable_dc6 = state & DC_STATE_EN_UPTO_DC6;
- dc6_was_enabled = val & DC_STATE_EN_UPTO_DC6;
+ dc6_was_enabled = power_domains->dc_state & DC_STATE_EN_UPTO_DC6;
if (!dc6_was_enabled && enable_dc6)
intel_dmc_update_dc6_allowed_count(display, true);
@@ -1230,7 +1276,7 @@ static void vlv_init_display_clock_gating(struct intel_display *display)
* Disable trickle feed and enable pnd deadline calculation
*/
intel_de_write(display, MI_ARB_VLV,
- MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+ MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE_VLV);
intel_de_write(display, CBR1_VLV, 0);
drm_WARN_ON(display->drm, DISPLAY_RUNTIME_INFO(display)->rawclk_freq == 0);
@@ -1795,7 +1841,7 @@ tgl_tc_cold_request(struct intel_display *display, bool block)
* Spec states that we should timeout the request after 200us
* but the function below will timeout after 500us
*/
- ret = intel_pcode_read(display->drm, TGL_PCODE_TCCOLD, &low_val, &high_val);
+ ret = intel_parent_pcode_read(display, TGL_PCODE_TCCOLD, &low_val, &high_val);
if (ret == 0) {
if (block &&
(low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED))
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.h b/drivers/gpu/drm/i915/display/intel_display_power_well.h
index ec8e508d0593..8f5524da2d06 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.h
@@ -103,8 +103,6 @@ struct i915_power_well_desc {
* the well enabled.
*/
u16 fixed_enable_delay:1;
- /* The pw is backing the VGA functionality */
- u16 has_vga:1;
u16 has_fuses:1;
/*
* The pw is for an ICL+ TypeC PHY port in
diff --git a/drivers/gpu/drm/i915/display/intel_display_regs.h b/drivers/gpu/drm/i915/display/intel_display_regs.h
index 9e0d853f4b61..4746e9ebd920 100644
--- a/drivers/gpu/drm/i915/display/intel_display_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_display_regs.h
@@ -6,6 +6,9 @@
#include "intel_display_reg_defs.h"
+#define GU_CNTL_PROTECTED _MMIO(0x10100C)
+#define DEPRESENT REG_BIT(9)
+
#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
#define _GEN7_PIPEB_DE_LOAD_SL 0x71068
#define GEN7_PIPE_DE_LOAD_SL(pipe) _MMIO_PIPE(pipe, _GEN7_PIPEA_DE_LOAD_SL, _GEN7_PIPEB_DE_LOAD_SL)
@@ -79,6 +82,29 @@
#define DERRMR_PIPEC_VBLANK (1 << 21)
#define DERRMR_PIPEC_HBLANK (1 << 22)
+#define GEN7_ERR_INT _MMIO(0x44040)
+#define ERR_INT_POISON (1 << 31)
+#define ERR_INT_INVALID_GTT_PTE (1 << 29)
+#define ERR_INT_INVALID_PTE_DATA (1 << 28)
+#define ERR_INT_SPRITE_C_FAULT (1 << 23)
+#define ERR_INT_PRIMARY_C_FAULT (1 << 22)
+#define ERR_INT_CURSOR_C_FAULT (1 << 21)
+#define ERR_INT_SPRITE_B_FAULT (1 << 20)
+#define ERR_INT_PRIMARY_B_FAULT (1 << 19)
+#define ERR_INT_CURSOR_B_FAULT (1 << 18)
+#define ERR_INT_SPRITE_A_FAULT (1 << 17)
+#define ERR_INT_PRIMARY_A_FAULT (1 << 16)
+#define ERR_INT_CURSOR_A_FAULT (1 << 15)
+#define ERR_INT_MMIO_UNCLAIMED (1 << 13)
+#define ERR_INT_PIPE_CRC_DONE_C (1 << 8)
+#define ERR_INT_FIFO_UNDERRUN_C (1 << 6)
+#define ERR_INT_PIPE_CRC_DONE_B (1 << 5)
+#define ERR_INT_FIFO_UNDERRUN_B (1 << 3)
+#define ERR_INT_PIPE_CRC_DONE_A (1 << 2)
+#define ERR_INT_PIPE_CRC_DONE(pipe) (1 << (2 + (pipe) * 3))
+#define ERR_INT_FIFO_UNDERRUN_A (1 << 0)
+#define ERR_INT_FIFO_UNDERRUN(pipe) (1 << ((pipe) * 3))
+
#define VLV_IRQ_REGS I915_IRQ_REGS(VLV_IMR, \
VLV_IER, \
VLV_IIR)
@@ -160,6 +186,47 @@
#define DPLL_FPA01_P1_POST_DIV_MASK_I830 0x001f0000
+#define DSPCLK_GATE_D _MMIO(0x6200)
+#define VLV_DSPCLK_GATE_D _MMIO(VLV_DISPLAY_BASE + 0x6200)
+# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
+# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
+# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */
+# define VRDUNIT_CLOCK_GATE_DISABLE (1 << 27) /* 965 */
+# define AUDUNIT_CLOCK_GATE_DISABLE (1 << 26) /* 965 */
+# define DPUNIT_A_CLOCK_GATE_DISABLE (1 << 25) /* 965 */
+# define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24) /* 965 */
+# define PNV_GMBUSUNIT_CLOCK_GATE_DISABLE (1 << 24) /* pnv */
+# define TVRUNIT_CLOCK_GATE_DISABLE (1 << 23) /* 915-945 */
+# define TVCUNIT_CLOCK_GATE_DISABLE (1 << 22) /* 915-945 */
+# define TVFUNIT_CLOCK_GATE_DISABLE (1 << 21) /* 915-945 */
+# define TVEUNIT_CLOCK_GATE_DISABLE (1 << 20) /* 915-945 */
+# define DVSUNIT_CLOCK_GATE_DISABLE (1 << 19) /* 915-945 */
+# define DSSUNIT_CLOCK_GATE_DISABLE (1 << 18) /* 915-945 */
+# define DDBUNIT_CLOCK_GATE_DISABLE (1 << 17) /* 915-945 */
+# define DPRUNIT_CLOCK_GATE_DISABLE (1 << 16) /* 915-945 */
+# define DPFUNIT_CLOCK_GATE_DISABLE (1 << 15) /* 915-945 */
+# define DPBMUNIT_CLOCK_GATE_DISABLE (1 << 14) /* 915-945 */
+# define DPLSUNIT_CLOCK_GATE_DISABLE (1 << 13) /* 915-945 */
+# define DPLUNIT_CLOCK_GATE_DISABLE (1 << 12) /* 915-945 */
+# define DPOUNIT_CLOCK_GATE_DISABLE (1 << 11)
+# define DPBUNIT_CLOCK_GATE_DISABLE (1 << 10)
+# define DCUNIT_CLOCK_GATE_DISABLE (1 << 9)
+# define DPUNIT_CLOCK_GATE_DISABLE (1 << 8)
+# define VRUNIT_CLOCK_GATE_DISABLE (1 << 7) /* 915+: reserved */
+# define OVHUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 830-865 */
+# define DPIOUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 915-945 */
+# define OVFUNIT_CLOCK_GATE_DISABLE (1 << 5)
+# define OVBUNIT_CLOCK_GATE_DISABLE (1 << 4)
+/*
+ * This bit must be set on the 830 to prevent hangs when turning off the
+ * overlay scaler.
+ */
+# define OVRUNIT_CLOCK_GATE_DISABLE (1 << 3)
+# define OVCUNIT_CLOCK_GATE_DISABLE (1 << 2)
+# define OVUUNIT_CLOCK_GATE_DISABLE (1 << 1)
+# define ZVUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 830 */
+# define OVLUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 845,865 */
+
/* Additional CHV pll/phy registers */
#define DPIO_PHY_STATUS _MMIO(VLV_DISPLAY_BASE + 0x6240)
#define DPLL_PORTD_READY_MASK (0xf)
@@ -281,6 +348,7 @@
#define FW_CSPWRDWNEN (1 << 15)
#define MI_ARB_VLV _MMIO(VLV_DISPLAY_BASE + 0x6504)
+#define MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE_VLV (1 << 2)
#define CZCLK_CDCLK_FREQ_RATIO _MMIO(VLV_DISPLAY_BASE + 0x6508)
#define CDCLK_FREQ_SHIFT 4
@@ -311,6 +379,46 @@
#define OGAMC1 _MMIO(0x30020)
#define OGAMC0 _MMIO(0x30024)
+#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
+#define _LATENCY_REPORTING_REMOVED_PIPE_D REG_BIT(31)
+#define SKL_SELECT_ALTERNATE_DC_EXIT REG_BIT(30)
+#define _LATENCY_REPORTING_REMOVED_PIPE_C REG_BIT(25)
+#define _LATENCY_REPORTING_REMOVED_PIPE_B REG_BIT(24)
+#define _LATENCY_REPORTING_REMOVED_PIPE_A REG_BIT(23)
+#define LATENCY_REPORTING_REMOVED(pipe) _PICK((pipe), \
+ _LATENCY_REPORTING_REMOVED_PIPE_A, \
+ _LATENCY_REPORTING_REMOVED_PIPE_B, \
+ _LATENCY_REPORTING_REMOVED_PIPE_C, \
+ _LATENCY_REPORTING_REMOVED_PIPE_D)
+#define ICL_DELAY_PMRSP REG_BIT(22)
+#define DISABLE_FLR_SRC REG_BIT(15)
+#define MASK_WAKEMEM REG_BIT(13)
+#define DDI_CLOCK_REG_ACCESS REG_BIT(7)
+
+#define CHICKEN_PAR1_1 _MMIO(0x42080)
+#define IGNORE_KVMR_PIPE_A REG_BIT(23)
+#define KBL_ARB_FILL_SPARE_22 REG_BIT(22)
+#define DIS_RAM_BYPASS_PSR2_MAN_TRACK REG_BIT(16)
+#define SKL_DE_COMPRESSED_HASH_MODE REG_BIT(15)
+#define HSW_MASK_VBL_TO_PIPE_IN_SRD REG_BIT(15) /* hsw/bdw */
+#define FORCE_ARB_IDLE_PLANES REG_BIT(14)
+#define SKL_EDP_PSR_FIX_RDWRAP REG_BIT(3)
+#define IGNORE_PSR2_HW_TRACKING REG_BIT(1)
+
+/*
+ * GEN9 clock gating regs
+ */
+#define GEN9_CLKGATE_DIS_0 _MMIO(0x46530)
+#define DARBF_GATING_DIS REG_BIT(27)
+#define DMG_GATING_DIS REG_BIT(21)
+#define MTL_PIPEDMC_GATING_DIS(pipe) REG_BIT(15 - (pipe))
+#define PWM2_GATING_DIS REG_BIT(14)
+#define PWM1_GATING_DIS REG_BIT(13)
+
+#define GEN9_CLKGATE_DIS_3 _MMIO(0x46538)
+#define TGL_VRH_GATING_DIS REG_BIT(31)
+#define DPT_GATING_DIS REG_BIT(22)
+
#define GEN9_CLKGATE_DIS_4 _MMIO(0x4653C)
#define BXT_GMBUS_GATING_DIS (1 << 14)
#define DG2_DPFC_GATING_DIS REG_BIT(31)
@@ -1003,6 +1111,15 @@
#define SWF3(dev_priv, i) _MMIO(DISPLAY_MMIO_BASE(dev_priv) + 0x72414 + (i) * 4)
#define SWF_ILK(i) _MMIO(0x4F000 + (i) * 4)
+#define DEISR _MMIO(0x44000)
+#define DEIMR _MMIO(0x44004)
+#define DEIIR _MMIO(0x44008)
+#define DEIER _MMIO(0x4400c)
+
+#define DE_IRQ_REGS I915_IRQ_REGS(DEIMR, \
+ DEIER, \
+ DEIIR)
+
#define DIGITAL_PORT_HOTPLUG_CNTRL _MMIO(0x44030)
#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4)
#define DIGITAL_PORTA_PULSE_DURATION_2ms (0 << 2) /* pre-HSW */
@@ -1333,6 +1450,44 @@
GEN8_DE_PORT_IER, \
GEN8_DE_PORT_IIR)
+/* interrupts */
+#define DE_MASTER_IRQ_CONTROL (1 << 31)
+#define DE_SPRITEB_FLIP_DONE (1 << 29)
+#define DE_SPRITEA_FLIP_DONE (1 << 28)
+#define DE_PLANEB_FLIP_DONE (1 << 27)
+#define DE_PLANEA_FLIP_DONE (1 << 26)
+#define DE_PLANE_FLIP_DONE(plane) (1 << (26 + (plane)))
+#define DE_PCU_EVENT (1 << 25)
+#define DE_GTT_FAULT (1 << 24)
+#define DE_POISON (1 << 23)
+#define DE_PERFORM_COUNTER (1 << 22)
+#define DE_PCH_EVENT (1 << 21)
+#define DE_AUX_CHANNEL_A (1 << 20)
+#define DE_DP_A_HOTPLUG (1 << 19)
+#define DE_GSE (1 << 18)
+#define DE_PIPEB_VBLANK (1 << 15)
+#define DE_PIPEB_EVEN_FIELD (1 << 14)
+#define DE_PIPEB_ODD_FIELD (1 << 13)
+#define DE_PIPEB_LINE_COMPARE (1 << 12)
+#define DE_PIPEB_VSYNC (1 << 11)
+#define DE_PIPEB_CRC_DONE (1 << 10)
+#define DE_PIPEB_FIFO_UNDERRUN (1 << 8)
+#define DE_PIPEA_VBLANK (1 << 7)
+#define DE_PIPE_VBLANK(pipe) (1 << (7 + 8 * (pipe)))
+#define DE_PIPEA_EVEN_FIELD (1 << 6)
+#define DE_PIPEA_ODD_FIELD (1 << 5)
+#define DE_PIPEA_LINE_COMPARE (1 << 4)
+#define DE_PIPEA_VSYNC (1 << 3)
+#define DE_PIPEA_CRC_DONE (1 << 2)
+#define DE_PIPE_CRC_DONE(pipe) (1 << (2 + 8 * (pipe)))
+#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
+#define DE_PIPE_FIFO_UNDERRUN(pipe) (1 << (8 * (pipe)))
+
+/* Display Internal Timeout Register */
+#define RM_TIMEOUT _MMIO(0x42060)
+#define RM_TIMEOUT_REG_CAPTURE _MMIO(0x420E0)
+#define MMIO_TIMEOUT_US(us) ((us) << 0)
+
#define GEN8_DE_MISC_ISR _MMIO(0x44460)
#define GEN8_DE_MISC_IMR _MMIO(0x44464)
#define GEN8_DE_MISC_IIR _MMIO(0x44468)
@@ -1466,6 +1621,29 @@
#define CHICKEN_FBC_STRIDE_MASK REG_GENMASK(12, 0)
#define CHICKEN_FBC_STRIDE(x) REG_FIELD_PREP(CHICKEN_FBC_STRIDE_MASK, (x))
+#define _CHICKEN_PIPESL_1_A 0x420b0
+#define _CHICKEN_PIPESL_1_B 0x420b4
+#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
+#define HSW_PRI_STRETCH_MAX_MASK REG_GENMASK(28, 27)
+#define HSW_PRI_STRETCH_MAX_X8 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 0)
+#define HSW_PRI_STRETCH_MAX_X4 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 1)
+#define HSW_PRI_STRETCH_MAX_X2 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 2)
+#define HSW_PRI_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 3)
+#define HSW_SPR_STRETCH_MAX_MASK REG_GENMASK(26, 25)
+#define HSW_SPR_STRETCH_MAX_X8 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 0)
+#define HSW_SPR_STRETCH_MAX_X4 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 1)
+#define HSW_SPR_STRETCH_MAX_X2 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 2)
+#define HSW_SPR_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 3)
+#define HSW_FBCQ_DIS REG_BIT(22)
+#define HSW_UNMASK_VBL_TO_REGS_IN_SRD REG_BIT(15) /* hsw */
+#define SKL_PSR_MASK_PLANE_FLIP REG_BIT(11) /* skl+ */
+#define SKL_PLANE1_STRETCH_MAX_MASK REG_GENMASK(1, 0)
+#define SKL_PLANE1_STRETCH_MAX_X8 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 0)
+#define SKL_PLANE1_STRETCH_MAX_X4 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 1)
+#define SKL_PLANE1_STRETCH_MAX_X2 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 2)
+#define SKL_PLANE1_STRETCH_MAX_X1 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 3)
+#define BDW_UNMASK_VBL_TO_REGS_IN_SRD REG_BIT(0) /* bdw */
+
#define _CHICKEN_TRANS_A 0x420c0
#define _CHICKEN_TRANS_B 0x420c4
#define _CHICKEN_TRANS_C 0x420c8
@@ -1552,6 +1730,11 @@
#define GLK_DFSM_DISPLAY_DSC_DISABLE (1 << 7)
#define XE2LPD_DFSM_DBUF_OVERLAP_DISABLE (1 << 3)
+#define GMD_ID_DISPLAY _MMIO(0x510a0)
+#define GMD_ID_DISPLAY_ARCH_MASK REG_GENMASK(31, 22)
+#define GMD_ID_DISPLAY_RELEASE_MASK REG_GENMASK(21, 14)
+#define GMD_ID_DISPLAY_STEP REG_GENMASK(5, 0)
+
#define XE2LPD_DE_CAP _MMIO(0x41100)
#define XE2LPD_DE_CAP_3DLUT_MASK REG_GENMASK(31, 30)
#define XE2LPD_DE_CAP_DSC_MASK REG_GENMASK(29, 28)
@@ -1685,6 +1868,13 @@
SDE_TC_HOTPLUG_ICP(HPD_PORT_TC2) | \
SDE_TC_HOTPLUG_ICP(HPD_PORT_TC1))
+/* PCH */
+
+#define SDEISR _MMIO(0xc4000)
+#define SDEIMR _MMIO(0xc4004)
+#define SDEIIR _MMIO(0xc4008)
+#define SDEIER _MMIO(0xc400c)
+
#define SDE_IRQ_REGS I915_IRQ_REGS(SDEIMR, \
SDEIER, \
SDEIIR)
@@ -2021,6 +2211,28 @@
#define TRANS_BPC_6 REG_FIELD_PREP(TRANS_BPC_MASK, 2)
#define TRANS_BPC_12 REG_FIELD_PREP(TRANS_BPC_MASK, 3)
+/* Icelake PPS_DATA and _ECC DIP Registers.
+ * These are available for transcoders B,C and eDP.
+ * Adding the _A so as to reuse the _MMIO_TRANS2
+ * definition, with which it offsets to the right location.
+ */
+
+#define _TRANSA_CHICKEN1 0xf0060
+#define _TRANSB_CHICKEN1 0xf1060
+#define TRANS_CHICKEN1(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
+#define TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE REG_BIT(10)
+#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE REG_BIT(4)
+
+#define _TRANSA_CHICKEN2 0xf0064
+#define _TRANSB_CHICKEN2 0xf1064
+#define TRANS_CHICKEN2(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
+#define TRANS_CHICKEN2_TIMING_OVERRIDE REG_BIT(31)
+#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED REG_BIT(29)
+#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK REG_GENMASK(28, 27)
+#define TRANS_CHICKEN2_FRAME_START_DELAY(x) REG_FIELD_PREP(TRANS_CHICKEN2_FRAME_START_DELAY_MASK, (x)) /* 0-3 */
+#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER REG_BIT(26)
+#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH REG_BIT(25)
+
#define PCH_DP_B _MMIO(0xe4100)
#define PCH_DP_C _MMIO(0xe4200)
#define PCH_DP_D _MMIO(0xe4300)
@@ -2211,6 +2423,13 @@
#define HSW_PWR_WELL_FORCE_ON (1 << 19)
#define HSW_PWR_WELL_CTL6 _MMIO(0x45414)
+/* clock gating DSS DSC disable register */
+#define CLKGATE_DIS_DSSDSC _MMIO(0x46548)
+#define DSS_PIPE_D_GATING_DISABLED REG_BIT(31)
+#define DSS_PIPE_C_GATING_DISABLED REG_BIT(29)
+#define DSS_PIPE_B_GATING_DISABLED REG_BIT(27)
+#define DSS_PIPE_A_GATING_DISABLED REG_BIT(25)
+
/* SKL Fuse Status */
enum skl_power_gate {
SKL_PG0,
@@ -2854,6 +3073,42 @@ enum skl_power_gate {
#define SFUSE_STRAP_DDIC_DETECTED (1 << 1)
#define SFUSE_STRAP_DDID_DETECTED (1 << 0)
+#define SOUTH_CHICKEN1 _MMIO(0xc2000)
+#define FDIA_PHASE_SYNC_SHIFT_OVR 19
+#define FDIA_PHASE_SYNC_SHIFT_EN 18
+#define INVERT_DDIE_HPD REG_BIT(28)
+#define INVERT_DDID_HPD_MTP REG_BIT(27)
+#define INVERT_TC4_HPD REG_BIT(26)
+#define INVERT_TC3_HPD REG_BIT(25)
+#define INVERT_TC2_HPD REG_BIT(24)
+#define INVERT_TC1_HPD REG_BIT(23)
+#define INVERT_DDID_HPD (1 << 18)
+#define INVERT_DDIC_HPD (1 << 17)
+#define INVERT_DDIB_HPD (1 << 16)
+#define INVERT_DDIA_HPD (1 << 15)
+#define FDI_PHASE_SYNC_OVR(pipe) (1 << (FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
+#define FDI_PHASE_SYNC_EN(pipe) (1 << (FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
+#define FDI_BC_BIFURCATION_SELECT (1 << 12)
+#define CHASSIS_CLK_REQ_DURATION_MASK (0xf << 8)
+#define CHASSIS_CLK_REQ_DURATION(x) ((x) << 8)
+#define SBCLK_RUN_REFCLK_DIS (1 << 7)
+#define ICP_SECOND_PPS_IO_SELECT REG_BIT(2)
+#define SPT_PWM_GRANULARITY (1 << 0)
+#define SOUTH_CHICKEN2 _MMIO(0xc2004)
+#define FDI_MPHY_IOSFSB_RESET_STATUS (1 << 13)
+#define FDI_MPHY_IOSFSB_RESET_CTL (1 << 12)
+#define LPT_PWM_GRANULARITY (1 << 5)
+#define DPLS_EDP_PPS_FIX_DIS (1 << 0)
+
+#define SOUTH_DSPCLK_GATE_D _MMIO(0xc2020)
+#define PCH_GMBUSUNIT_CLOCK_GATE_DISABLE (1 << 31)
+#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1 << 30)
+#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1 << 29)
+#define PCH_DPMGUNIT_CLOCK_GATE_DISABLE (1 << 15)
+#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1 << 14)
+#define CNP_PWM_CGE_GATING_DISABLE (1 << 13)
+#define PCH_LP_PARTITION_LEVEL_DISABLE (1 << 12)
+
/* Gen4+ Timestamp and Pipe Frame time stamp registers */
#define GEN4_TIMESTAMP _MMIO(0x2358)
#define ILK_TIMESTAMP_HI _MMIO(0x70070)
@@ -2940,6 +3195,12 @@ enum skl_power_gate {
#define MTL_PIPE_CLKGATE_DIS2(pipe) _MMIO_PIPE(pipe, _MTL_PIPE_CLKGATE_DIS2_A, _MTL_PIPE_CLKGATE_DIS2_B)
#define MTL_DPFC_GATING_DIS REG_BIT(6)
+#define MTL_MEM_SS_INFO_GLOBAL _MMIO(0x45700)
+#define XE3P_ECC_IMPACTING_DE REG_BIT(12)
+#define MTL_N_OF_ENABLED_QGV_POINTS_MASK REG_GENMASK(11, 8)
+#define MTL_N_OF_POPULATED_CH_MASK REG_GENMASK(7, 4)
+#define MTL_DDR_TYPE_MASK REG_GENMASK(3, 0)
+
#define MTL_MEM_SS_INFO_QGV_POINT_OFFSET 0x45710
#define MTL_MEM_SS_INFO_QGV_POINT_LOW(point) _MMIO(MTL_MEM_SS_INFO_QGV_POINT_OFFSET + (point) * 8)
#define MTL_TRCD_MASK REG_GENMASK(31, 24)
@@ -2950,6 +3211,11 @@ enum skl_power_gate {
#define MTL_TRAS_MASK REG_GENMASK(16, 8)
#define MTL_TRDPRE_MASK REG_GENMASK(7, 0)
-
+#define FW_BLC _MMIO(0x20d8)
+#define FW_BLC2 _MMIO(0x20dc)
+#define FW_BLC_SELF _MMIO(0x20e0) /* 915+ only */
+#define FW_BLC_SELF_EN_MASK REG_BIT(31)
+#define FW_BLC_SELF_FIFO_MASK REG_BIT(16) /* 945 only */
+#define FW_BLC_SELF_EN REG_BIT(15) /* 945 only */
#endif /* __INTEL_DISPLAY_REGS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_rps.c b/drivers/gpu/drm/i915/display/intel_display_rps.c
index b58281edc563..2f8248458826 100644
--- a/drivers/gpu/drm/i915/display/intel_display_rps.c
+++ b/drivers/gpu/drm/i915/display/intel_display_rps.c
@@ -8,8 +8,8 @@
#include <drm/drm_crtc.h>
#include <drm/drm_vblank.h>
-#include "i915_reg.h"
#include "intel_display_core.h"
+#include "intel_display_regs.h"
#include "intel_display_irq.h"
#include "intel_display_rps.h"
#include "intel_display_types.h"
diff --git a/drivers/gpu/drm/i915/display/intel_display_snapshot.c b/drivers/gpu/drm/i915/display/intel_display_snapshot.c
index f650f15ad394..7f423182aa29 100644
--- a/drivers/gpu/drm/i915/display/intel_display_snapshot.c
+++ b/drivers/gpu/drm/i915/display/intel_display_snapshot.c
@@ -19,7 +19,6 @@ struct intel_display_snapshot {
struct intel_display_device_info info;
struct intel_display_runtime_info runtime_info;
struct intel_display_params params;
- struct intel_overlay_snapshot *overlay;
struct intel_dmc_snapshot *dmc;
struct intel_display_irq_snapshot *irq;
};
@@ -41,7 +40,6 @@ struct intel_display_snapshot *intel_display_snapshot_capture(struct intel_displ
intel_display_params_copy(&snapshot->params);
snapshot->irq = intel_display_irq_snapshot_capture(display);
- snapshot->overlay = intel_overlay_snapshot_capture(display);
snapshot->dmc = intel_dmc_snapshot_capture(display);
return snapshot;
@@ -61,7 +59,6 @@ void intel_display_snapshot_print(const struct intel_display_snapshot *snapshot,
intel_display_params_dump(&snapshot->params, display->drm->driver->name, p);
intel_display_irq_snapshot_print(snapshot->irq, p);
- intel_overlay_snapshot_print(snapshot->overlay, p);
intel_dmc_snapshot_print(snapshot->dmc, p);
}
@@ -73,7 +70,6 @@ void intel_display_snapshot_free(struct intel_display_snapshot *snapshot)
intel_display_params_free(&snapshot->params);
kfree(snapshot->irq);
- kfree(snapshot->overlay);
kfree(snapshot->dmc);
kfree(snapshot);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 6b92f333e18b..e189f8c39ccb 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -145,7 +145,7 @@ struct intel_framebuffer {
struct intel_fb_view remapped_view;
};
- struct i915_address_space *dpt_vm;
+ struct intel_dpt *dpt;
unsigned int min_alignment;
unsigned int vtd_guard;
@@ -351,6 +351,7 @@ struct intel_vbt_panel_data {
bool low_vswing;
bool hobl;
bool dsc_disable;
+ bool pipe_joiner_enable;
} edp;
struct {
@@ -1162,6 +1163,7 @@ struct intel_crtc_state {
} dsi_pll;
int max_link_bpp_x16; /* in 1/16 bpp units */
+ int max_pipe_bpp; /* in 1 bpp units */
int pipe_bpp; /* in 1 bpp units */
int min_hblank;
struct intel_link_m_n dp_m_n;
@@ -1333,10 +1335,13 @@ struct intel_crtc_state {
/* Only used for state computation, not read out from the HW. */
bool compression_enabled_on_link;
bool compression_enable;
- int num_streams;
+ struct intel_dsc_slice_config {
+ int pipes_per_line;
+ int streams_per_pipe;
+ int slices_per_stream;
+ } slice_config;
/* Compressed Bpp in U6.4 format (first 4 bits for fractional part) */
u16 compressed_bpp_x16;
- u8 slice_count;
struct drm_dsc_config config;
} dsc;
@@ -1791,6 +1796,7 @@ struct intel_dp {
int link_rate;
u8 lane_count;
u8 sink_count;
+ bool downstream_port_changed;
bool needs_modeset_retry;
bool use_max_params;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.c b/drivers/gpu/drm/i915/display/intel_display_wa.c
index 581d943b9bdc..081a4092cd13 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.c
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.c
@@ -5,11 +5,11 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_core.h"
#include "intel_display_regs.h"
#include "intel_display_wa.h"
+#include "intel_step.h"
static void gen11_display_wa_apply(struct intel_display *display)
{
@@ -32,9 +32,17 @@ static void adlp_display_wa_apply(struct intel_display *display)
intel_de_rmw(display, GEN8_CHICKEN_DCPR_1, DDI_CLOCK_REG_ACCESS, 0);
}
+static void xe3plpd_display_wa_apply(struct intel_display *display)
+{
+ /* Wa_22021451799 */
+ intel_de_rmw(display, GEN9_CLKGATE_DIS_0, 0, DMG_GATING_DIS);
+}
+
void intel_display_wa_apply(struct intel_display *display)
{
- if (display->platform.alderlake_p)
+ if (DISPLAY_VER(display) == 35)
+ xe3plpd_display_wa_apply(display);
+ else if (display->platform.alderlake_p)
adlp_display_wa_apply(display);
else if (DISPLAY_VER(display) == 12)
xe_d_display_wa_apply(display);
@@ -62,22 +70,89 @@ static bool intel_display_needs_wa_16025573575(struct intel_display *display)
bool __intel_display_wa(struct intel_display *display, enum intel_display_wa wa, const char *name)
{
switch (wa) {
+ case INTEL_DISPLAY_WA_1409120013:
+ return IS_DISPLAY_VER(display, 11, 12);
+ case INTEL_DISPLAY_WA_1409767108:
+ return (display->platform.alderlake_s ||
+ (display->platform.rocketlake &&
+ IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)));
case INTEL_DISPLAY_WA_13012396614:
- return DISPLAY_VERx100(display) == 3000;
+ return DISPLAY_VERx100(display) == 3000 ||
+ DISPLAY_VERx100(display) == 3500;
+ case INTEL_DISPLAY_WA_14010477008:
+ return display->platform.dg1 || display->platform.rocketlake ||
+ (display->platform.tigerlake &&
+ IS_DISPLAY_STEP(display, STEP_A0, STEP_D0));
+ case INTEL_DISPLAY_WA_14010480278:
+ return (IS_DISPLAY_VER(display, 10, 12));
+ case INTEL_DISPLAY_WA_14010547955:
+ return display->platform.dg2;
+ case INTEL_DISPLAY_WA_14010685332:
+ return INTEL_PCH_TYPE(display) >= PCH_CNP &&
+ INTEL_PCH_TYPE(display) < PCH_DG1;
+ case INTEL_DISPLAY_WA_14011294188:
+ return INTEL_PCH_TYPE(display) >= PCH_TGP &&
+ INTEL_PCH_TYPE(display) < PCH_DG1;
+ case INTEL_DISPLAY_WA_14011503030:
case INTEL_DISPLAY_WA_14011503117:
+ case INTEL_DISPLAY_WA_22012358565:
return DISPLAY_VER(display) == 13;
+ case INTEL_DISPLAY_WA_14011508470:
+ return (IS_DISPLAY_VERx100(display, 1200, 1300));
+ case INTEL_DISPLAY_WA_14011765242:
+ return display->platform.alderlake_s &&
+ IS_DISPLAY_STEP(display, STEP_A0, STEP_A2);
+ case INTEL_DISPLAY_WA_14014143976:
+ return IS_DISPLAY_STEP(display, STEP_E0, STEP_FOREVER);
+ case INTEL_DISPLAY_WA_14016740474:
+ return IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_C0);
+ case INTEL_DISPLAY_WA_14020863754:
+ return DISPLAY_VERx100(display) == 3000 ||
+ DISPLAY_VERx100(display) == 2000 ||
+ DISPLAY_VERx100(display) == 1401;
case INTEL_DISPLAY_WA_14025769978:
return DISPLAY_VER(display) == 35;
+ case INTEL_DISPLAY_WA_15013987218:
+ return DISPLAY_VER(display) == 20;
case INTEL_DISPLAY_WA_15018326506:
return display->platform.battlemage;
+ case INTEL_DISPLAY_WA_16011303918:
+ case INTEL_DISPLAY_WA_22011320316:
+ return display->platform.alderlake_p &&
+ IS_DISPLAY_STEP(display, STEP_A0, STEP_B0);
+ case INTEL_DISPLAY_WA_16011181250:
+ return display->platform.rocketlake || display->platform.alderlake_s ||
+ display->platform.dg2;
+ case INTEL_DISPLAY_WA_16011342517:
+ return display->platform.alderlake_p &&
+ IS_DISPLAY_STEP(display, STEP_A0, STEP_D0);
+ case INTEL_DISPLAY_WA_16011863758:
+ return DISPLAY_VER(display) >= 11;
case INTEL_DISPLAY_WA_16023588340:
return intel_display_needs_wa_16023588340(display);
case INTEL_DISPLAY_WA_16025573575:
return intel_display_needs_wa_16025573575(display);
+ case INTEL_DISPLAY_WA_16025596647:
+ return DISPLAY_VER(display) == 20 &&
+ IS_DISPLAY_VERx100_STEP(display, 3000,
+ STEP_A0, STEP_B0);
+ case INTEL_DISPLAY_WA_18034343758:
+ return DISPLAY_VER(display) == 20 ||
+ (display->platform.pantherlake &&
+ IS_DISPLAY_STEP(display, STEP_A0, STEP_B0));
+ case INTEL_DISPLAY_WA_22010178259:
+ return DISPLAY_VER(display) == 12;
+ case INTEL_DISPLAY_WA_22010947358:
+ return display->platform.alderlake_p;
+ case INTEL_DISPLAY_WA_22012278275:
+ return display->platform.alderlake_p &&
+ IS_DISPLAY_STEP(display, STEP_A0, STEP_E0);
case INTEL_DISPLAY_WA_22014263786:
return IS_DISPLAY_VERx100(display, 1100, 1400);
+ case INTEL_DISPLAY_WA_22021048059:
+ return IS_DISPLAY_VER(display, 14, 35);
default:
- drm_WARN(display->drm, 1, "Missing Wa number: %s\n", name);
+ drm_WARN(display->drm, 1, "Missing Wa: %s\n", name);
break;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.h b/drivers/gpu/drm/i915/display/intel_display_wa.h
index 40f989f19df1..15fec843f15e 100644
--- a/drivers/gpu/drm/i915/display/intel_display_wa.h
+++ b/drivers/gpu/drm/i915/display/intel_display_wa.h
@@ -27,18 +27,44 @@ bool intel_display_needs_wa_16023588340(struct intel_display *display);
* number.
*/
enum intel_display_wa {
+ INTEL_DISPLAY_WA_1409120013,
+ INTEL_DISPLAY_WA_1409767108,
INTEL_DISPLAY_WA_13012396614,
+ INTEL_DISPLAY_WA_14010477008,
+ INTEL_DISPLAY_WA_14010480278,
+ INTEL_DISPLAY_WA_14010547955,
+ INTEL_DISPLAY_WA_14010685332,
+ INTEL_DISPLAY_WA_14011294188,
+ INTEL_DISPLAY_WA_14011503030,
INTEL_DISPLAY_WA_14011503117,
+ INTEL_DISPLAY_WA_14011508470,
+ INTEL_DISPLAY_WA_14011765242,
+ INTEL_DISPLAY_WA_14014143976,
+ INTEL_DISPLAY_WA_14016740474,
+ INTEL_DISPLAY_WA_14020863754,
INTEL_DISPLAY_WA_14025769978,
+ INTEL_DISPLAY_WA_15013987218,
INTEL_DISPLAY_WA_15018326506,
+ INTEL_DISPLAY_WA_16011181250,
+ INTEL_DISPLAY_WA_16011303918,
+ INTEL_DISPLAY_WA_16011342517,
+ INTEL_DISPLAY_WA_16011863758,
INTEL_DISPLAY_WA_16023588340,
INTEL_DISPLAY_WA_16025573575,
+ INTEL_DISPLAY_WA_16025596647,
+ INTEL_DISPLAY_WA_18034343758,
+ INTEL_DISPLAY_WA_22010178259,
+ INTEL_DISPLAY_WA_22010947358,
+ INTEL_DISPLAY_WA_22011320316,
+ INTEL_DISPLAY_WA_22012278275,
+ INTEL_DISPLAY_WA_22012358565,
INTEL_DISPLAY_WA_22014263786,
+ INTEL_DISPLAY_WA_22021048059,
};
bool __intel_display_wa(struct intel_display *display, enum intel_display_wa wa, const char *name);
#define intel_display_wa(__display, __wa) \
- __intel_display_wa((__display), INTEL_DISPLAY_WA_##__wa, __stringify(__wa))
+ __intel_display_wa((__display), __wa, __stringify(__wa))
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c
index 1006b060c3f3..90ba932d940a 100644
--- a/drivers/gpu/drm/i915/display/intel_dmc.c
+++ b/drivers/gpu/drm/i915/display/intel_dmc.c
@@ -29,7 +29,6 @@
#include <drm/drm_file.h>
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_power_well.h"
@@ -1599,8 +1598,7 @@ static bool intel_dmc_get_dc6_allowed_count(struct intel_display *display, u32 *
return false;
mutex_lock(&power_domains->lock);
- dc6_enabled = intel_de_read(display, DC_STATE_EN) &
- DC_STATE_EN_UPTO_DC6;
+ dc6_enabled = power_domains->dc_state & DC_STATE_EN_UPTO_DC6;
if (dc6_enabled)
intel_dmc_update_dc6_allowed_count(display, false);
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 559cf3bb23fd..4955bd8b11d7 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -107,20 +107,6 @@
/* Constants for DP DSC configurations */
static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
-/*
- * With Single pipe configuration, HW is capable of supporting maximum of:
- * 2 slices per line for ICL, BMG
- * 4 slices per line for other platforms.
- * For now consider a max of 2 slices per line, which works for all platforms.
- * With this we can have max of 4 DSC Slices per pipe.
- *
- * For higher resolutions where 12 slice support is required with
- * ultrajoiner, only then each pipe can support 3 slices.
- *
- * #TODO Split this better to use 4 slices/dsc engine where supported.
- */
-static const u8 valid_dsc_slicecount[] = {1, 2, 3, 4};
-
/**
* intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
* @intel_dp: DP struct
@@ -508,11 +494,16 @@ bool intel_dp_has_joiner(struct intel_dp *intel_dp)
struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &intel_dig_port->base;
+ struct intel_connector *connector = intel_dp->attached_connector;
/* eDP MSO is not compatible with joiner */
if (intel_dp->mso_link_count)
return false;
+ if (intel_dp_is_edp(intel_dp) &&
+ !connector->panel.vbt.edp.pipe_joiner_enable)
+ return false;
+
return DISPLAY_VER(display) >= 12 ||
(DISPLAY_VER(display) == 11 &&
encoder->port != PORT_A);
@@ -959,19 +950,25 @@ u32 get_max_compressed_bpp_with_joiner(struct intel_display *display,
return max_bpp;
}
-u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
- int mode_clock, int mode_hdisplay,
- int num_joined_pipes)
+static int intel_dp_dsc_min_slice_count(const struct intel_connector *connector,
+ int mode_clock, int mode_hdisplay)
{
struct intel_display *display = to_intel_display(connector);
- u32 sink_slice_count_mask =
- drm_dp_dsc_sink_slice_count_mask(connector->dp.dsc_dpcd, false);
- u8 min_slice_count, i;
+ bool is_edp =
+ connector->base.connector_type == DRM_MODE_CONNECTOR_eDP;
+ int min_slice_count;
int max_slice_width;
int tp_rgb_yuv444;
int tp_yuv422_420;
/*
+ * TODO: allow using less than the maximum number of slices
+ * supported by the eDP sink, to allow using fewer DSC engines.
+ */
+ if (is_edp)
+ return drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd, true);
+
+ /*
* TODO: Use the throughput value specific to the actual RGB/YUV
* format of the output.
* The RGB/YUV444 throughput value should be always either equal
@@ -1011,7 +1008,7 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
* slice and VDSC engine, whenever we approach close enough to max CDCLK
*/
if (mode_clock >= ((display->cdclk.max_cdclk_freq * 85) / 100))
- min_slice_count = max_t(u8, min_slice_count, 2);
+ min_slice_count = max(min_slice_count, 2);
max_slice_width = drm_dp_dsc_sink_max_slice_width(connector->dp.dsc_dpcd);
if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
@@ -1021,39 +1018,64 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
return 0;
}
/* Also take into account max slice width */
- min_slice_count = max_t(u8, min_slice_count,
- DIV_ROUND_UP(mode_hdisplay,
- max_slice_width));
+ min_slice_count = max(min_slice_count,
+ DIV_ROUND_UP(mode_hdisplay, max_slice_width));
- /* Find the closest match to the valid slice count values */
- for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
- u8 test_slice_count = valid_dsc_slicecount[i] * num_joined_pipes;
+ return min_slice_count;
+}
- /*
- * 3 DSC Slices per pipe need 3 DSC engines, which is supported only
- * with Ultrajoiner only for some platforms.
- */
- if (valid_dsc_slicecount[i] == 3 &&
- (!HAS_DSC_3ENGINES(display) || num_joined_pipes != 4))
+static bool
+intel_dp_dsc_get_slice_config(const struct intel_connector *connector,
+ int mode_clock, int mode_hdisplay,
+ int num_joined_pipes,
+ struct intel_dsc_slice_config *config_ret)
+{
+ struct intel_display *display = to_intel_display(connector);
+ int min_slice_count =
+ intel_dp_dsc_min_slice_count(connector, mode_clock, mode_hdisplay);
+ bool is_edp =
+ connector->base.connector_type == DRM_MODE_CONNECTOR_eDP;
+ u32 sink_slice_count_mask =
+ drm_dp_dsc_sink_slice_count_mask(connector->dp.dsc_dpcd, is_edp);
+ int slices_per_pipe;
+
+ /*
+ * Find the closest match to the valid slice count values
+ *
+ * Max HW DSC-per-pipe x slice-per-DSC (= slice-per-pipe) capability:
+ * ICL: 2x2
+ * BMG: 2x2, or for ultrajoined 4 pipes: 3x1
+ * TGL+: 2x4 (TODO: Add support for this)
+ *
+ * TODO: Explore if it's worth increasing the number of slices (from 1
+ * to 2 or 3), so that multiple VDSC engines can be used, thus
+ * reducing the minimum CDCLK requirement, which in turn is determined
+ * by the 1 pixel per clock VDSC engine throughput in
+ * intel_vdsc_min_cdclk().
+ */
+ for (slices_per_pipe = 1; slices_per_pipe <= 4; slices_per_pipe++) {
+ struct intel_dsc_slice_config config;
+ int slices_per_line;
+
+ if (!intel_dsc_get_slice_config(display,
+ num_joined_pipes, slices_per_pipe,
+ &config))
continue;
- if (!(drm_dp_dsc_slice_count_to_mask(test_slice_count) &
+ slices_per_line = intel_dsc_line_slice_count(&config);
+
+ if (!(drm_dp_dsc_slice_count_to_mask(slices_per_line) &
sink_slice_count_mask))
continue;
- /*
- * Bigjoiner needs small joiner to be enabled.
- * So there should be at least 2 dsc slices per pipe,
- * whenever bigjoiner is enabled.
- */
- if (num_joined_pipes > 1 && valid_dsc_slicecount[i] < 2)
+ if (mode_hdisplay % slices_per_line)
continue;
- if (mode_hdisplay % test_slice_count)
- continue;
+ if (min_slice_count <= slices_per_line) {
+ *config_ret = config;
- if (min_slice_count <= test_slice_count)
- return test_slice_count;
+ return true;
+ }
}
/* Print slice count 1,2,4,..24 if bit#0,1,3,..23 is set in the mask. */
@@ -1064,7 +1086,21 @@ u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
min_slice_count,
(int)BITS_PER_TYPE(sink_slice_count_mask), &sink_slice_count_mask);
- return 0;
+ return false;
+}
+
+u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
+ int mode_clock, int mode_hdisplay,
+ int num_joined_pipes)
+{
+ struct intel_dsc_slice_config config;
+
+ if (!intel_dp_dsc_get_slice_config(connector,
+ mode_clock, mode_hdisplay,
+ num_joined_pipes, &config))
+ return 0;
+
+ return intel_dsc_line_slice_count(&config);
}
static bool source_can_output(struct intel_dp *intel_dp,
@@ -1335,44 +1371,9 @@ intel_dp_mode_valid_downstream(struct intel_connector *connector,
return MODE_OK;
}
-static
-bool intel_dp_needs_joiner(struct intel_dp *intel_dp,
- struct intel_connector *connector,
- int hdisplay, int clock,
- int num_joined_pipes)
-{
- struct intel_display *display = to_intel_display(intel_dp);
- int hdisplay_limit;
-
- if (!intel_dp_has_joiner(intel_dp))
- return false;
-
- num_joined_pipes /= 2;
-
- hdisplay_limit = DISPLAY_VER(display) >= 30 ? 6144 : 5120;
-
- return clock > num_joined_pipes * display->cdclk.max_dotclk_freq ||
- hdisplay > num_joined_pipes * hdisplay_limit;
-}
-
-int intel_dp_num_joined_pipes(struct intel_dp *intel_dp,
- struct intel_connector *connector,
- int hdisplay, int clock)
+int intel_dp_max_hdisplay_per_pipe(struct intel_display *display)
{
- struct intel_display *display = to_intel_display(intel_dp);
-
- if (connector->force_joined_pipes)
- return connector->force_joined_pipes;
-
- if (HAS_ULTRAJOINER(display) &&
- intel_dp_needs_joiner(intel_dp, connector, hdisplay, clock, 4))
- return 4;
-
- if ((HAS_BIGJOINER(display) || HAS_UNCOMPRESSED_JOINER(display)) &&
- intel_dp_needs_joiner(intel_dp, connector, hdisplay, clock, 2))
- return 2;
-
- return 1;
+ return DISPLAY_VER(display) >= 30 ? 6144 : 5120;
}
bool intel_dp_has_dsc(const struct intel_connector *connector)
@@ -1395,6 +1396,51 @@ bool intel_dp_has_dsc(const struct intel_connector *connector)
return true;
}
+static
+bool intel_dp_can_join(struct intel_dp *intel_dp,
+ int num_joined_pipes)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ if (num_joined_pipes > 1 && !intel_dp_has_joiner(intel_dp))
+ return false;
+
+ switch (num_joined_pipes) {
+ case 1:
+ return true;
+ case 2:
+ return HAS_BIGJOINER(display) ||
+ HAS_UNCOMPRESSED_JOINER(display);
+ case 4:
+ return HAS_ULTRAJOINER(display);
+ default:
+ return false;
+ }
+}
+
+bool intel_dp_dotclk_valid(struct intel_display *display,
+ int target_clock,
+ int htotal,
+ int dsc_slice_count,
+ int num_joined_pipes)
+{
+ int max_dotclk = display->cdclk.max_dotclk_freq;
+ int effective_dotclk_limit;
+
+ effective_dotclk_limit = max_dotclk * num_joined_pipes;
+
+ if (dsc_slice_count)
+ target_clock = intel_dsc_get_pixel_rate_with_dsc_bubbles(display,
+ target_clock,
+ htotal,
+ dsc_slice_count);
+ else
+ effective_dotclk_limit =
+ intel_max_uncompressed_dotclock(display) * num_joined_pipes;
+
+ return target_clock <= effective_dotclk_limit;
+}
+
static enum drm_mode_status
intel_dp_mode_valid(struct drm_connector *_connector,
const struct drm_display_mode *mode)
@@ -1406,9 +1452,7 @@ intel_dp_mode_valid(struct drm_connector *_connector,
const struct drm_display_mode *fixed_mode;
int target_clock = mode->clock;
int max_rate, mode_rate, max_lanes, max_link_clock;
- int max_dotclk = display->cdclk.max_dotclk_freq;
u16 dsc_max_compressed_bpp = 0;
- u8 dsc_slice_count = 0;
enum drm_mode_status status;
bool dsc = false;
int num_joined_pipes;
@@ -1424,6 +1468,9 @@ intel_dp_mode_valid(struct drm_connector *_connector,
if (mode->clock < 10000)
return MODE_CLOCK_LOW;
+ if (intel_dp_hdisplay_bad(display, mode->hdisplay))
+ return MODE_H_ILLEGAL;
+
fixed_mode = intel_panel_fixed_mode(connector, mode);
if (intel_dp_is_edp(intel_dp) && fixed_mode) {
status = intel_panel_mode_valid(connector, mode);
@@ -1433,23 +1480,9 @@ intel_dp_mode_valid(struct drm_connector *_connector,
target_clock = fixed_mode->clock;
}
- num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, connector,
- mode->hdisplay, target_clock);
- max_dotclk *= num_joined_pipes;
-
sink_format = intel_dp_sink_format(connector, mode);
output_format = intel_dp_output_format(connector, sink_format);
- status = intel_pfit_mode_valid(display, mode, output_format, num_joined_pipes);
- if (status != MODE_OK)
- return status;
-
- if (target_clock > max_dotclk)
- return MODE_CLOCK_HIGH;
-
- if (intel_dp_hdisplay_bad(display, mode->hdisplay))
- return MODE_H_ILLEGAL;
-
max_link_clock = intel_dp_max_link_rate(intel_dp);
max_lanes = intel_dp_max_lane_count(intel_dp);
@@ -1460,52 +1493,92 @@ intel_dp_mode_valid(struct drm_connector *_connector,
target_clock, mode->hdisplay,
link_bpp_x16, 0);
- if (intel_dp_has_dsc(connector)) {
- int pipe_bpp;
+ /*
+ * We cannot determine the required pipe‑join count before knowing whether
+ * DSC is needed, nor can we determine DSC need without knowing the pipe
+ * count.
+ * Because of this dependency cycle, the only correct approach is to iterate
+ * over candidate pipe counts and evaluate each combination.
+ */
+ status = MODE_CLOCK_HIGH;
+ for_each_joiner_candidate(connector, mode, num_joined_pipes) {
+ int dsc_slice_count = 0;
- /*
- * TBD pass the connector BPC,
- * for now U8_MAX so that max BPC on that platform would be picked
- */
- pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, U8_MAX);
+ status = intel_pfit_mode_valid(display, mode, output_format, num_joined_pipes);
+ if (status != MODE_OK)
+ continue;
- /*
- * Output bpp is stored in 6.4 format so right shift by 4 to get the
- * integer value since we support only integer values of bpp.
- */
- if (intel_dp_is_edp(intel_dp)) {
- dsc_max_compressed_bpp =
- drm_edp_dsc_sink_output_bpp(connector->dp.dsc_dpcd) >> 4;
- dsc_slice_count =
- drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd,
- true);
- dsc = dsc_max_compressed_bpp && dsc_slice_count;
- } else if (drm_dp_sink_supports_fec(connector->dp.fec_capability)) {
- unsigned long bw_overhead_flags = 0;
-
- if (!drm_dp_is_uhbr_rate(max_link_clock))
- bw_overhead_flags |= DRM_DP_BW_OVERHEAD_FEC;
-
- dsc = intel_dp_mode_valid_with_dsc(connector,
- max_link_clock, max_lanes,
- target_clock, mode->hdisplay,
- num_joined_pipes,
- output_format, pipe_bpp,
- bw_overhead_flags);
+ if (intel_dp_has_dsc(connector)) {
+ int pipe_bpp;
+
+ dsc_slice_count = intel_dp_dsc_get_slice_count(connector,
+ target_clock,
+ mode->hdisplay,
+ num_joined_pipes);
+
+ /*
+ * TBD pass the connector BPC,
+ * for now U8_MAX so that max BPC on that platform would be picked
+ */
+ pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, U8_MAX);
+
+ /*
+ * Output bpp is stored in 6.4 format so right shift by 4 to get the
+ * integer value since we support only integer values of bpp.
+ */
+ if (intel_dp_is_edp(intel_dp)) {
+ dsc_max_compressed_bpp =
+ drm_edp_dsc_sink_output_bpp(connector->dp.dsc_dpcd) >> 4;
+
+ dsc = dsc_max_compressed_bpp && dsc_slice_count;
+ } else if (drm_dp_sink_supports_fec(connector->dp.fec_capability)) {
+ unsigned long bw_overhead_flags = 0;
+
+ if (!drm_dp_is_uhbr_rate(max_link_clock))
+ bw_overhead_flags |= DRM_DP_BW_OVERHEAD_FEC;
+
+ dsc = intel_dp_mode_valid_with_dsc(connector,
+ max_link_clock, max_lanes,
+ target_clock, mode->hdisplay,
+ num_joined_pipes,
+ output_format, pipe_bpp,
+ bw_overhead_flags);
+ }
}
- }
- if (intel_dp_joiner_needs_dsc(display, num_joined_pipes) && !dsc)
- return MODE_CLOCK_HIGH;
+ if (intel_dp_joiner_needs_dsc(display, num_joined_pipes) && !dsc) {
+ status = MODE_CLOCK_HIGH;
+ continue;
+ }
- if (mode_rate > max_rate && !dsc)
- return MODE_CLOCK_HIGH;
+ if (mode_rate > max_rate && !dsc) {
+ status = MODE_CLOCK_HIGH;
+ continue;
+ }
+
+ status = intel_mode_valid_max_plane_size(display, mode, num_joined_pipes);
+ if (status != MODE_OK)
+ continue;
+
+ if (!dsc)
+ dsc_slice_count = 0;
+
+ if (!intel_dp_dotclk_valid(display,
+ target_clock,
+ mode->htotal,
+ dsc_slice_count,
+ num_joined_pipes)) {
+ status = MODE_CLOCK_HIGH;
+ continue;
+ }
+
+ break;
+ }
- status = intel_dp_mode_valid_downstream(connector, mode, target_clock);
if (status != MODE_OK)
return status;
- return intel_mode_valid_max_plane_size(display, mode, num_joined_pipes);
+ return intel_dp_mode_valid_downstream(connector, mode, target_clock);
}
bool intel_dp_source_supports_tps3(struct intel_display *display)
@@ -1696,7 +1769,7 @@ static int intel_dp_max_bpp(struct intel_dp *intel_dp,
struct intel_connector *connector = intel_dp->attached_connector;
int bpp, bpc;
- bpc = crtc_state->pipe_bpp / 3;
+ bpc = crtc_state->max_pipe_bpp / 3;
if (intel_dp->dfp.max_bpc)
bpc = min_t(int, bpc, intel_dp->dfp.max_bpc);
@@ -2032,12 +2105,14 @@ static int dsc_compute_link_config(struct intel_dp *intel_dp,
} else {
unsigned long bw_overhead_flags =
pipe_config->fec_enable ? DRM_DP_BW_OVERHEAD_FEC : 0;
+ int line_slice_count =
+ intel_dsc_line_slice_count(&pipe_config->dsc.slice_config);
if (!is_bw_sufficient_for_dsc_config(intel_dp,
link_rate, lane_count,
adjusted_mode->crtc_clock,
adjusted_mode->hdisplay,
- pipe_config->dsc.slice_count,
+ line_slice_count,
dsc_bpp_x16,
bw_overhead_flags))
continue;
@@ -2344,6 +2419,17 @@ bool intel_dp_needs_8b10b_fec(const struct intel_crtc_state *crtc_state,
return dsc_enabled_on_crtc || intel_dsc_enabled_on_link(crtc_state);
}
+void intel_dp_dsc_reset_config(struct intel_crtc_state *crtc_state)
+{
+ crtc_state->fec_enable = false;
+
+ crtc_state->dsc.compression_enable = false;
+ crtc_state->dsc.compressed_bpp_x16 = 0;
+
+ memset(&crtc_state->dsc.slice_config, 0, sizeof(crtc_state->dsc.slice_config));
+ memset(&crtc_state->dsc.config, 0, sizeof(crtc_state->dsc.config));
+}
+
int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state,
@@ -2382,47 +2468,10 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
}
}
- /* Calculate Slice count */
- if (intel_dp_is_edp(intel_dp)) {
- pipe_config->dsc.slice_count =
- drm_dp_dsc_sink_max_slice_count(connector->dp.dsc_dpcd,
- true);
- if (!pipe_config->dsc.slice_count) {
- drm_dbg_kms(display->drm,
- "Unsupported Slice Count %d\n",
- pipe_config->dsc.slice_count);
- return -EINVAL;
- }
- } else {
- u8 dsc_dp_slice_count;
-
- dsc_dp_slice_count =
- intel_dp_dsc_get_slice_count(connector,
- adjusted_mode->crtc_clock,
- adjusted_mode->crtc_hdisplay,
- num_joined_pipes);
- if (!dsc_dp_slice_count) {
- drm_dbg_kms(display->drm,
- "Compressed Slice Count not supported\n");
- return -EINVAL;
- }
-
- pipe_config->dsc.slice_count = dsc_dp_slice_count;
- }
- /*
- * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
- * is greater than the maximum Cdclock and if slice count is even
- * then we need to use 2 VDSC instances.
- * In case of Ultrajoiner along with 12 slices we need to use 3
- * VDSC instances.
- */
- if (pipe_config->joiner_pipes && num_joined_pipes == 4 &&
- pipe_config->dsc.slice_count == 12)
- pipe_config->dsc.num_streams = 3;
- else if (pipe_config->joiner_pipes || pipe_config->dsc.slice_count > 1)
- pipe_config->dsc.num_streams = 2;
- else
- pipe_config->dsc.num_streams = 1;
+ if (!intel_dp_dsc_get_slice_config(connector, adjusted_mode->crtc_clock,
+ adjusted_mode->crtc_hdisplay, num_joined_pipes,
+ &pipe_config->dsc.slice_config))
+ return -EINVAL;
ret = intel_dp_dsc_compute_params(connector, pipe_config);
if (ret < 0) {
@@ -2440,7 +2489,7 @@ int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
"Compressed Bpp = " FXP_Q4_FMT " Slice Count = %d\n",
pipe_config->pipe_bpp,
FXP_Q4_ARGS(pipe_config->dsc.compressed_bpp_x16),
- pipe_config->dsc.slice_count);
+ intel_dsc_line_slice_count(&pipe_config->dsc.slice_config));
return 0;
}
@@ -2476,8 +2525,8 @@ dsc_throughput_quirk_max_bpp_x16(const struct intel_connector *connector,
return fxp_q4_from_int(12);
}
-static int compute_min_compressed_bpp_x16(struct intel_connector *connector,
- enum intel_output_format output_format)
+int intel_dp_compute_min_compressed_bpp_x16(struct intel_connector *connector,
+ enum intel_output_format output_format)
{
int dsc_src_min_bpp, dsc_sink_min_bpp, dsc_min_bpp;
int min_bpp_x16;
@@ -2543,7 +2592,8 @@ bool intel_dp_mode_valid_with_dsc(struct intel_connector *connector,
int pipe_bpp, unsigned long bw_overhead_flags)
{
struct intel_dp *intel_dp = intel_attached_dp(connector);
- int min_bpp_x16 = compute_min_compressed_bpp_x16(connector, output_format);
+ int min_bpp_x16 = intel_dp_compute_min_compressed_bpp_x16(connector,
+ output_format);
int max_bpp_x16 = compute_max_compressed_bpp_x16(connector,
mode_clock, mode_hdisplay,
num_joined_pipes,
@@ -2597,7 +2647,8 @@ intel_dp_compute_config_link_bpp_limits(struct intel_connector *connector,
limits->link.min_bpp_x16 = fxp_q4_from_int(limits->pipe.min_bpp);
} else {
limits->link.min_bpp_x16 =
- compute_min_compressed_bpp_x16(connector, crtc_state->output_format);
+ intel_dp_compute_min_compressed_bpp_x16(connector,
+ crtc_state->output_format);
max_link_bpp_x16 =
compute_max_compressed_bpp_x16(connector,
@@ -2691,7 +2742,7 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp,
* previously. This hack should be removed once we have the
* proper retry logic in place.
*/
- limits->pipe.max_bpp = min(crtc_state->pipe_bpp, 24);
+ limits->pipe.max_bpp = min(crtc_state->max_pipe_bpp, 24);
} else {
limits->pipe.max_bpp = intel_dp_max_bpp(intel_dp, crtc_state,
respect_downstream_limits);
@@ -2710,9 +2761,39 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp,
crtc_state)));
}
+ if (limits->pipe.min_bpp <= 0 ||
+ limits->pipe.min_bpp > limits->pipe.max_bpp) {
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s] Invalid pipe bpp range: %d-%d\n",
+ connector->base.base.id, connector->base.name,
+ limits->pipe.min_bpp, limits->pipe.max_bpp);
+
+ return false;
+ }
+
if (dsc && !intel_dp_dsc_compute_pipe_bpp_limits(connector, limits))
return false;
+ /*
+ * crtc_state->pipe_bpp is the non-DP specific baseline (platform /
+ * EDID) maximum pipe BPP limited by the max-BPC connector property
+ * request. Since by now pipe.max_bpp is <= the above baseline
+ * maximum BPP, the only remaining reason for adjusting pipe.max_bpp
+ * is the max-BPC connector property request. Adjust pipe.max_bpp to
+ * this request within the current valid pipe.min_bpp .. pipe.max_bpp
+ * range.
+ */
+ limits->pipe.max_bpp = clamp(crtc_state->pipe_bpp, limits->pipe.min_bpp,
+ limits->pipe.max_bpp);
+ if (dsc)
+ limits->pipe.max_bpp = align_max_sink_dsc_input_bpp(connector,
+ limits->pipe.max_bpp);
+
+ if (limits->pipe.max_bpp != crtc_state->pipe_bpp)
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s] Adjusting requested max pipe bpp %d -> %d\n",
+ connector->base.base.id, connector->base.name,
+ crtc_state->pipe_bpp, limits->pipe.max_bpp);
+
if (is_mst || intel_dp->use_max_params) {
/*
* For MST we always configure max link bw - the spec doesn't
@@ -2764,13 +2845,13 @@ bool intel_dp_joiner_needs_dsc(struct intel_display *display,
}
static int
-intel_dp_compute_link_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state,
- bool respect_downstream_limits)
+intel_dp_compute_link_for_joined_pipes(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state,
+ bool respect_downstream_limits)
{
struct intel_display *display = to_intel_display(encoder);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ int num_joined_pipes = intel_crtc_num_joined_pipes(pipe_config);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
@@ -2778,18 +2859,9 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct link_config_limits limits;
bool dsc_needed, joiner_needs_dsc;
- int num_joined_pipes;
int ret = 0;
- if (pipe_config->fec_enable &&
- !intel_dp_supports_fec(intel_dp, connector, pipe_config))
- return -EINVAL;
-
- num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, connector,
- adjusted_mode->crtc_hdisplay,
- adjusted_mode->crtc_clock);
- if (num_joined_pipes > 1)
- pipe_config->joiner_pipes = GENMASK(crtc->pipe + num_joined_pipes - 1, crtc->pipe);
+ intel_dp_dsc_reset_config(pipe_config);
joiner_needs_dsc = intel_dp_joiner_needs_dsc(display, num_joined_pipes);
@@ -2813,7 +2885,13 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
fxp_q4_from_int(pipe_config->pipe_bpp),
fxp_q4_from_int(pipe_config->pipe_bpp),
0, false);
- if (ret)
+
+ if (ret ||
+ !intel_dp_dotclk_valid(display,
+ adjusted_mode->crtc_clock,
+ adjusted_mode->crtc_htotal,
+ 0,
+ num_joined_pipes))
dsc_needed = true;
}
@@ -2823,6 +2901,8 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
}
if (dsc_needed) {
+ int dsc_slice_count;
+
drm_dbg_kms(display->drm,
"Try DSC (fallback=%s, joiner=%s, force=%s)\n",
str_yes_no(ret), str_yes_no(joiner_needs_dsc),
@@ -2838,6 +2918,15 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
conn_state, &limits, 64);
if (ret < 0)
return ret;
+
+ dsc_slice_count = intel_dsc_line_slice_count(&pipe_config->dsc.slice_config);
+
+ if (!intel_dp_dotclk_valid(display,
+ adjusted_mode->crtc_clock,
+ adjusted_mode->crtc_htotal,
+ dsc_slice_count,
+ num_joined_pipes))
+ return -EINVAL;
}
drm_dbg_kms(display->drm,
@@ -2854,6 +2943,55 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
return 0;
}
+static int
+intel_dp_compute_link_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ bool respect_downstream_limits)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+ int num_joined_pipes;
+ int ret = -EINVAL;
+
+ if (crtc_state->fec_enable &&
+ !intel_dp_supports_fec(intel_dp, connector, crtc_state))
+ return -EINVAL;
+
+ for_each_joiner_candidate(connector, adjusted_mode, num_joined_pipes) {
+ /*
+ * NOTE:
+ * The crtc_state->joiner_pipes should have been set at the end
+ * only if all the conditions are met. However that would mean
+ * that num_joined_pipes is passed around to all helpers and
+ * make them use it instead of using crtc_state->joiner_pipes
+ * directly or indirectly (via intel_crtc_num_joined_pipes()).
+ *
+ * For now, setting crtc_state->joiner_pipes to the candidate
+ * value to avoid the above churn and resetting it to 0, in case
+ * no joiner candidate is found to be suitable for the given
+ * configuration.
+ */
+ if (num_joined_pipes > 1)
+ crtc_state->joiner_pipes = GENMASK(crtc->pipe + num_joined_pipes - 1,
+ crtc->pipe);
+
+ ret = intel_dp_compute_link_for_joined_pipes(encoder, crtc_state, conn_state,
+ respect_downstream_limits);
+ if (ret == 0 || ret == -EDEADLK)
+ break;
+ }
+
+ if (ret < 0)
+ crtc_state->joiner_pipes = 0;
+
+ return ret;
+}
+
bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
@@ -4293,20 +4431,24 @@ static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
}
-static void intel_dp_read_dsc_dpcd(struct drm_dp_aux *aux,
- u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
+static int intel_dp_read_dsc_dpcd(struct drm_dp_aux *aux,
+ u8 dsc_dpcd[DP_DSC_RECEIVER_CAP_SIZE])
{
- if (drm_dp_dpcd_read(aux, DP_DSC_SUPPORT, dsc_dpcd,
- DP_DSC_RECEIVER_CAP_SIZE) < 0) {
- drm_err(aux->drm_dev,
- "Failed to read DPCD register 0x%x\n",
- DP_DSC_SUPPORT);
- return;
+ int ret;
+
+ ret = drm_dp_dpcd_read_data(aux, DP_DSC_SUPPORT, dsc_dpcd,
+ DP_DSC_RECEIVER_CAP_SIZE);
+ if (ret) {
+ drm_dbg_kms(aux->drm_dev,
+ "Could not read DSC DPCD register 0x%x Error: %pe\n",
+ DP_DSC_SUPPORT, ERR_PTR(ret));
+ return ret;
}
drm_dbg_kms(aux->drm_dev, "DSC DPCD: %*ph\n",
DP_DSC_RECEIVER_CAP_SIZE,
dsc_dpcd);
+ return 0;
}
static void init_dsc_overall_throughput_limits(struct intel_connector *connector, bool is_branch)
@@ -4357,8 +4499,9 @@ void intel_dp_get_dsc_sink_cap(u8 dpcd_rev,
if (dpcd_rev < DP_DPCD_REV_14)
return;
- intel_dp_read_dsc_dpcd(connector->dp.dsc_decompression_aux,
- connector->dp.dsc_dpcd);
+ if (intel_dp_read_dsc_dpcd(connector->dp.dsc_decompression_aux,
+ connector->dp.dsc_dpcd) < 0)
+ return;
if (drm_dp_dpcd_readb(connector->dp.dsc_decompression_aux, DP_FEC_CAPABILITY,
&connector->dp.fec_capability) < 0) {
@@ -4388,7 +4531,9 @@ static void intel_edp_get_dsc_sink_cap(u8 edp_dpcd_rev, struct intel_connector *
if (edp_dpcd_rev < DP_EDP_14)
return;
- intel_dp_read_dsc_dpcd(connector->dp.dsc_decompression_aux, connector->dp.dsc_dpcd);
+ if (intel_dp_read_dsc_dpcd(connector->dp.dsc_decompression_aux,
+ connector->dp.dsc_dpcd) < 0)
+ return;
if (connector->dp.dsc_dpcd[0] & DP_DSC_DECOMPRESSION_IS_SUPPORTED)
init_dsc_overall_throughput_limits(connector, false);
@@ -4577,6 +4722,7 @@ static bool
intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(intel_dp);
+ int ret;
/* this function is meant to be called only once */
drm_WARN_ON(display->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
@@ -4616,6 +4762,12 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector
*/
intel_dp_init_source_oui(intel_dp);
+ /* Read the ALPM DPCD caps */
+ ret = drm_dp_dpcd_read_byte(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
+ &intel_dp->alpm_dpcd);
+ if (ret < 0)
+ return false;
+
/*
* This has to be called after intel_dp->edp_dpcd is filled, PSR checks
* for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
@@ -4784,6 +4936,24 @@ intel_dp_mst_disconnect(struct intel_dp *intel_dp)
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst.mgr, intel_dp->is_mst);
}
+#define INTEL_DP_DEVICE_SERVICE_IRQ_MASK_SST (DP_AUTOMATED_TEST_REQUEST | \
+ DP_CP_IRQ | \
+ DP_SINK_SPECIFIC_IRQ)
+
+#define INTEL_DP_DEVICE_SERVICE_IRQ_MASK_MST (DP_CP_IRQ | \
+ DP_DOWN_REP_MSG_RDY | \
+ DP_UP_REQ_MSG_RDY)
+
+#define INTEL_DP_LINK_SERVICE_IRQ_MASK_SST (RX_CAP_CHANGED | \
+ LINK_STATUS_CHANGED | \
+ HDMI_LINK_STATUS_CHANGED | \
+ CONNECTED_OFF_ENTRY_REQUESTED | \
+ DP_TUNNELING_IRQ)
+
+#define INTEL_DP_LINK_SERVICE_IRQ_MASK_MST (RX_CAP_CHANGED | \
+ LINK_STATUS_CHANGED | \
+ DP_TUNNELING_IRQ)
+
static bool
intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *esi)
{
@@ -4820,6 +4990,79 @@ static bool intel_dp_ack_sink_irq_esi(struct intel_dp *intel_dp, u8 esi[4])
return false;
}
+/* Return %true if reading the ESI vector succeeded, %false otherwise. */
+static bool intel_dp_get_sink_irq_esi_sst(struct intel_dp *intel_dp, u8 esi[4])
+{
+ memset(esi, 0, 4);
+
+ /*
+ * TODO: For DP_DPCD_REV >= 0x12 read
+ * DP_SINK_COUNT_ESI and DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0.
+ */
+ if (drm_dp_dpcd_read_data(&intel_dp->aux, DP_SINK_COUNT, esi, 2) != 0)
+ return false;
+
+ if (intel_dp->dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)
+ return true;
+
+ /* TODO: Read DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1 as well */
+ if (drm_dp_dpcd_read_byte(&intel_dp->aux, DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &esi[3]) != 0)
+ return false;
+
+ return true;
+}
+
+/* Return %true if acking the ESI vector IRQ events succeeded, %false otherwise. */
+static bool intel_dp_ack_sink_irq_esi_sst(struct intel_dp *intel_dp, u8 esi[4])
+{
+ /*
+ * TODO: For DP_DPCD_REV >= 0x12 write
+ * DP_DEVICE_SERVICE_IRQ_VECTOR_ESI0
+ */
+ if (drm_dp_dpcd_write_byte(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, esi[1]) != 0)
+ return false;
+
+ if (intel_dp->dpcd[DP_DPCD_REV] < DP_DPCD_REV_12)
+ return true;
+
+ /* TODO: Read DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1 as well */
+ if (drm_dp_dpcd_write_byte(&intel_dp->aux, DP_LINK_SERVICE_IRQ_VECTOR_ESI0, esi[3]) != 0)
+ return false;
+
+ return true;
+}
+
+/*
+ * Return %true if reading the ESI vector and acking the ESI IRQ events succeeded,
+ * %false otherwise.
+ */
+static bool intel_dp_get_and_ack_sink_irq_esi_sst(struct intel_dp *intel_dp, u8 esi[4])
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+
+ if (!intel_dp_get_sink_irq_esi_sst(intel_dp, esi))
+ return false;
+
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s][ENCODER:%d:%s] DPRX ESI: %4ph\n",
+ connector->base.base.id, connector->base.name,
+ encoder->base.base.id, encoder->base.name,
+ esi);
+
+ esi[1] &= INTEL_DP_DEVICE_SERVICE_IRQ_MASK_SST;
+ esi[3] &= INTEL_DP_LINK_SERVICE_IRQ_MASK_SST;
+
+ if (mem_is_zero(&esi[1], 3))
+ return true;
+
+ if (!intel_dp_ack_sink_irq_esi_sst(intel_dp, esi))
+ return false;
+
+ return true;
+}
+
bool
intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
@@ -5312,23 +5555,7 @@ intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, u8 *ack)
}
}
-static bool intel_dp_mst_link_status(struct intel_dp *intel_dp)
-{
- struct intel_display *display = to_intel_display(intel_dp);
- struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- u8 link_status[DP_LINK_STATUS_SIZE] = {};
- const size_t esi_link_status_size = DP_LINK_STATUS_SIZE - 2;
-
- if (drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS_ESI, link_status,
- esi_link_status_size) != esi_link_status_size) {
- drm_err(display->drm,
- "[ENCODER:%d:%s] Failed to read link status\n",
- encoder->base.base.id, encoder->base.name);
- return false;
- }
-
- return intel_dp_link_ok(intel_dp, link_status);
-}
+static bool intel_dp_handle_link_service_irq(struct intel_dp *intel_dp, u8 irq_mask);
/**
* intel_dp_check_mst_status - service any pending MST interrupts, check link status
@@ -5348,53 +5575,51 @@ static bool
intel_dp_check_mst_status(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct intel_encoder *encoder = &dig_port->base;
- bool link_ok = true;
+ bool force_retrain = intel_dp->link.force_retrain;
bool reprobe_needed = false;
for (;;) {
u8 esi[4] = {};
u8 ack[4] = {};
+ bool new_irqs;
if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) {
drm_dbg_kms(display->drm,
"failed to get ESI - device may have failed\n");
- link_ok = false;
+ reprobe_needed = true;
break;
}
drm_dbg_kms(display->drm, "DPRX ESI: %4ph\n", esi);
- if (intel_dp_mst_active_streams(intel_dp) > 0 && link_ok &&
- esi[3] & LINK_STATUS_CHANGED) {
- if (!intel_dp_mst_link_status(intel_dp))
- link_ok = false;
- ack[3] |= LINK_STATUS_CHANGED;
- }
+ ack[3] |= esi[3] & INTEL_DP_LINK_SERVICE_IRQ_MASK_MST;
intel_dp_mst_hpd_irq(intel_dp, esi, ack);
- if (esi[3] & DP_TUNNELING_IRQ) {
- if (drm_dp_tunnel_handle_irq(display->dp_tunnel_mgr,
- &intel_dp->aux))
- reprobe_needed = true;
- ack[3] |= DP_TUNNELING_IRQ;
- }
+ new_irqs = !mem_is_zero(ack, sizeof(ack));
- if (mem_is_zero(ack, sizeof(ack)))
- break;
+ drm_WARN_ON(display->drm, ack[1] & ~INTEL_DP_DEVICE_SERVICE_IRQ_MASK_MST);
+ drm_WARN_ON(display->drm, ack[3] & ~INTEL_DP_LINK_SERVICE_IRQ_MASK_MST);
- if (!intel_dp_ack_sink_irq_esi(intel_dp, ack))
+ if (new_irqs && !intel_dp_ack_sink_irq_esi(intel_dp, ack))
drm_dbg_kms(display->drm, "Failed to ack ESI\n");
if (ack[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY))
drm_dp_mst_hpd_irq_send_new_request(&intel_dp->mst.mgr);
- }
- if (!link_ok || intel_dp->link.force_retrain)
- intel_encoder_link_check_queue_work(encoder, 0);
+ if (force_retrain) {
+ /* Defer forced retraining to the regular link status check. */
+ ack[3] |= LINK_STATUS_CHANGED;
+ force_retrain = false;
+ }
+
+ if (intel_dp_handle_link_service_irq(intel_dp, ack[3]))
+ reprobe_needed = true;
+
+ if (!new_irqs)
+ break;
+ }
return !reprobe_needed;
}
@@ -5423,6 +5648,30 @@ intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp)
}
}
+static int
+intel_dp_read_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE])
+{
+ int err;
+
+ memset(link_status, 0, DP_LINK_STATUS_SIZE);
+
+ if (intel_dp_mst_active_streams(intel_dp) > 0)
+ err = drm_dp_dpcd_read_data(&intel_dp->aux, DP_LANE0_1_STATUS_ESI,
+ link_status, DP_LINK_STATUS_SIZE - 2);
+ else
+ err = drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
+ link_status);
+
+ if (err)
+ return err;
+
+ if (link_status[DP_LANE_ALIGN_STATUS_UPDATED - DP_LANE0_1_STATUS] &
+ DP_DOWNSTREAM_PORT_STATUS_CHANGED)
+ WRITE_ONCE(intel_dp->downstream_port_changed, true);
+
+ return 0;
+}
+
static bool
intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
{
@@ -5445,8 +5694,7 @@ intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
if (intel_dp->link.force_retrain)
return true;
- if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX,
- link_status) < 0)
+ if (intel_dp_read_link_status(intel_dp, link_status) < 0)
return false;
/*
@@ -5643,55 +5891,57 @@ void intel_dp_check_link_state(struct intel_dp *intel_dp)
intel_encoder_link_check_queue_work(encoder, 0);
}
-static void intel_dp_check_device_service_irq(struct intel_dp *intel_dp)
+static void intel_dp_handle_device_service_irq(struct intel_dp *intel_dp, u8 irq_mask)
{
struct intel_display *display = to_intel_display(intel_dp);
- u8 val;
- if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
- return;
-
- if (drm_dp_dpcd_readb(&intel_dp->aux,
- DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
- return;
-
- drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
+ drm_WARN_ON(display->drm, irq_mask & ~INTEL_DP_DEVICE_SERVICE_IRQ_MASK_SST);
- if (val & DP_AUTOMATED_TEST_REQUEST)
+ if (irq_mask & DP_AUTOMATED_TEST_REQUEST)
intel_dp_test_request(intel_dp);
- if (val & DP_CP_IRQ)
+ if (irq_mask & DP_CP_IRQ)
intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
- if (val & DP_SINK_SPECIFIC_IRQ)
+ if (irq_mask & DP_SINK_SPECIFIC_IRQ)
drm_dbg_kms(display->drm, "Sink specific irq unhandled\n");
}
-static bool intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
+
+/*
+ * Return %true if a full connector reprobe is required after handling a link
+ * service IRQ event.
+ */
+static bool intel_dp_handle_link_service_irq(struct intel_dp *intel_dp, u8 irq_mask)
{
struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_connector *connector = intel_dp->attached_connector;
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
bool reprobe_needed = false;
- u8 val;
- if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
- return false;
-
- if (drm_dp_dpcd_readb(&intel_dp->aux,
- DP_LINK_SERVICE_IRQ_VECTOR_ESI0, &val) != 1 || !val)
- return false;
+ drm_WARN_ON(display->drm, irq_mask & ~(INTEL_DP_LINK_SERVICE_IRQ_MASK_SST |
+ INTEL_DP_LINK_SERVICE_IRQ_MASK_MST));
- if ((val & DP_TUNNELING_IRQ) &&
- drm_dp_tunnel_handle_irq(display->dp_tunnel_mgr,
- &intel_dp->aux))
+ if (irq_mask & RX_CAP_CHANGED)
reprobe_needed = true;
- if (drm_dp_dpcd_writeb(&intel_dp->aux,
- DP_LINK_SERVICE_IRQ_VECTOR_ESI0, val) != 1)
- return reprobe_needed;
+ if (irq_mask & LINK_STATUS_CHANGED)
+ intel_dp_check_link_state(intel_dp);
- if (val & HDMI_LINK_STATUS_CHANGED)
+ if (irq_mask & HDMI_LINK_STATUS_CHANGED)
intel_dp_handle_hdmi_link_status_change(intel_dp);
+ if (irq_mask & CONNECTED_OFF_ENTRY_REQUESTED)
+ drm_dbg_kms(display->drm,
+ "[CONNECTOR:%d:%s][ENCODER:%d:%s] Allowing connected off request\n",
+ connector->base.base.id, connector->base.name,
+ encoder->base.base.id, encoder->base.name);
+
+ if ((irq_mask & DP_TUNNELING_IRQ) &&
+ drm_dp_tunnel_handle_irq(display->dp_tunnel_mgr,
+ &intel_dp->aux))
+ reprobe_needed = true;
+
return reprobe_needed;
}
@@ -5711,32 +5961,42 @@ static bool intel_dp_check_link_service_irq(struct intel_dp *intel_dp)
static bool
intel_dp_short_pulse(struct intel_dp *intel_dp)
{
- u8 old_sink_count = intel_dp->sink_count;
bool reprobe_needed = false;
- bool ret;
+ u8 esi[4] = {};
intel_dp_test_reset(intel_dp);
+ if (!intel_dp_get_and_ack_sink_irq_esi_sst(intel_dp, esi))
+ return false;
+
/*
- * Now read the DPCD to see if it's actually running
* If the current value of sink count doesn't match with
- * the value that was stored earlier or dpcd read failed
- * we need to do full detection
+ * the value that was stored earlier we need to do full
+ * detection.
*/
- ret = intel_dp_get_dpcd(intel_dp);
-
- if ((old_sink_count != intel_dp->sink_count) || !ret) {
+ if (intel_dp_has_sink_count(intel_dp) &&
+ DP_GET_SINK_COUNT(esi[0]) != intel_dp->sink_count)
/* No need to proceed if we are going to do full detect */
return false;
- }
- intel_dp_check_device_service_irq(intel_dp);
- reprobe_needed = intel_dp_check_link_service_irq(intel_dp);
+ intel_dp_handle_device_service_irq(intel_dp, esi[1]);
+
+ /*
+ * Force checking the link status for DPCD_REV < 1.2
+ * TODO: let the link status check depend on LINK_STATUS_CHANGED
+ * or intel_dp->link.force_retrain for DPCD_REV >= 1.2
+ */
+ esi[3] |= LINK_STATUS_CHANGED;
+ if (intel_dp_handle_link_service_irq(intel_dp, esi[3]))
+ reprobe_needed = true;
/* Handle CEC interrupts, if any */
drm_dp_cec_irq(&intel_dp->aux);
- intel_dp_check_link_state(intel_dp);
+ if (READ_ONCE(intel_dp->downstream_port_changed)) {
+ WRITE_ONCE(intel_dp->downstream_port_changed, false);
+ reprobe_needed = true;
+ }
intel_psr_short_pulse(intel_dp);
@@ -5763,6 +6023,8 @@ intel_dp_detect_dpcd(struct intel_dp *intel_dp)
if (drm_WARN_ON(display->drm, intel_dp_is_edp(intel_dp)))
return connector_status_connected;
+ WRITE_ONCE(intel_dp->downstream_port_changed, false);
+
intel_lspcon_resume(dig_port);
if (!intel_dp_get_dpcd(intel_dp))
@@ -6189,8 +6451,6 @@ intel_dp_detect(struct drm_connector *_connector,
if (intel_dp_is_edp(intel_dp) || connector->detect_edid)
status = connector_status_connected;
- intel_dp_check_device_service_irq(intel_dp);
-
out_unset_edid:
if (status != connector_status_connected && !intel_dp->is_mst)
intel_dp_unset_edid(intel_dp);
@@ -7086,6 +7346,8 @@ int intel_dp_compute_config_late(struct intel_encoder *encoder,
if (ret)
return ret;
+ intel_alpm_lobf_compute_config_late(intel_dp, crtc_state);
+
return 0;
}
@@ -7134,3 +7396,22 @@ int intel_dp_sdp_min_guardband(const struct intel_crtc_state *crtc_state,
return sdp_guardband;
}
+
+bool intel_dp_joiner_candidate_valid(struct intel_connector *connector,
+ int hdisplay,
+ int num_joined_pipes)
+{
+ struct intel_display *display = to_intel_display(connector);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+
+ if (!intel_dp_can_join(intel_dp, num_joined_pipes))
+ return false;
+
+ if (hdisplay > num_joined_pipes * intel_dp_max_hdisplay_per_pipe(display))
+ return false;
+
+ if (connector->force_joined_pipes && connector->force_joined_pipes != num_joined_pipes)
+ return false;
+
+ return true;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 25bfbfd291b0..2849b9ecdc71 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -75,6 +75,7 @@ int intel_dp_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state);
bool intel_dp_needs_8b10b_fec(const struct intel_crtc_state *crtc_state,
bool dsc_enabled_on_crtc);
+void intel_dp_dsc_reset_config(struct intel_crtc_state *crtc_state);
int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state,
@@ -143,6 +144,8 @@ bool intel_digital_port_connected(struct intel_encoder *encoder);
bool intel_digital_port_connected_locked(struct intel_encoder *encoder);
int intel_dp_dsc_compute_max_bpp(const struct intel_connector *connector,
u8 dsc_max_bpc);
+int intel_dp_compute_min_compressed_bpp_x16(struct intel_connector *connector,
+ enum intel_output_format output_format);
bool intel_dp_mode_valid_with_dsc(struct intel_connector *connector,
int link_clock, int lane_count,
int mode_clock, int mode_hdisplay,
@@ -153,10 +156,6 @@ bool intel_dp_dsc_valid_compressed_bpp(struct intel_dp *intel_dp, int bpp_x16);
u8 intel_dp_dsc_get_slice_count(const struct intel_connector *connector,
int mode_clock, int mode_hdisplay,
int num_joined_pipes);
-int intel_dp_num_joined_pipes(struct intel_dp *intel_dp,
- struct intel_connector *connector,
- int hdisplay, int clock);
-
static inline unsigned int intel_dp_unused_lane_mask(int lane_count)
{
return ~((1 << lane_count) - 1) & 0xf;
@@ -225,5 +224,18 @@ int intel_dp_compute_config_late(struct intel_encoder *encoder,
struct drm_connector_state *conn_state);
int intel_dp_sdp_min_guardband(const struct intel_crtc_state *crtc_state,
bool assume_all_enabled);
+int intel_dp_max_hdisplay_per_pipe(struct intel_display *display);
+bool intel_dp_dotclk_valid(struct intel_display *display,
+ int target_clock,
+ int htotal,
+ int dsc_slice_count,
+ int num_joined_pipes);
+bool intel_dp_joiner_candidate_valid(struct intel_connector *connector,
+ int hdisplay,
+ int num_joined_pipes);
+
+#define for_each_joiner_candidate(__connector, __mode, __num_joined_pipes) \
+ for ((__num_joined_pipes) = 1; (__num_joined_pipes) <= (I915_MAX_PIPES); (__num_joined_pipes)++) \
+ for_each_if(intel_dp_joiner_candidate_valid(__connector, (__mode)->hdisplay, __num_joined_pipes))
#endif /* __INTEL_DP_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index eb05ef4bd9f6..a7b186d0e3c4 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -368,6 +368,16 @@ static const char *dpcd_vs_pwm_str(bool aux)
return aux ? "DPCD" : "PWM";
}
+static const char *backlight_unit_str(struct intel_panel *panel)
+{
+ if (panel->backlight.edp.vesa.info.luminance_set)
+ return "NITS";
+ else if (panel->backlight.edp.vesa.info.aux_set)
+ return "Brightness %";
+ else
+ return "PWM";
+}
+
static void
intel_dp_aux_write_panel_luminance_override(struct intel_connector *connector)
{
@@ -459,7 +469,7 @@ static u32 intel_dp_aux_vesa_get_backlight(struct intel_connector *connector, en
return val / 1000;
}
- return connector->panel.backlight.level;
+ return panel->backlight.level;
}
static void
@@ -486,7 +496,8 @@ intel_dp_aux_vesa_enable_backlight(const struct intel_crtc_state *crtc_state,
struct intel_panel *panel = &connector->panel;
struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
- if (!panel->backlight.edp.vesa.info.aux_enable) {
+ if (!(panel->backlight.edp.vesa.info.aux_enable ||
+ panel->backlight.edp.vesa.info.luminance_set)) {
u32 pwm_level;
if (!panel->backlight.edp.vesa.info.aux_set)
@@ -510,7 +521,8 @@ static void intel_dp_aux_vesa_disable_backlight(const struct drm_connector_state
drm_edp_backlight_disable(&intel_dp->aux, &panel->backlight.edp.vesa.info);
- if (!panel->backlight.edp.vesa.info.aux_enable)
+ if (!(panel->backlight.edp.vesa.info.aux_enable ||
+ panel->backlight.edp.vesa.info.luminance_set))
panel->backlight.pwm_funcs->disable(old_conn_state,
intel_backlight_invert_pwm_level(connector, 0));
}
@@ -537,11 +549,14 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] AUX VESA backlight enable is controlled through %s\n",
connector->base.base.id, connector->base.name,
- dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_enable));
+ dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_enable ||
+ panel->backlight.edp.vesa.info.luminance_set));
drm_dbg_kms(display->drm,
- "[CONNECTOR:%d:%s] AUX VESA backlight level is controlled through %s\n",
+ "[CONNECTOR:%d:%s] AUX VESA backlight level is controlled through %s using %s values\n",
connector->base.base.id, connector->base.name,
- dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_set));
+ dpcd_vs_pwm_str(panel->backlight.edp.vesa.info.aux_set ||
+ panel->backlight.edp.vesa.info.luminance_set),
+ backlight_unit_str(panel));
if (!panel->backlight.edp.vesa.info.aux_set ||
!panel->backlight.edp.vesa.info.aux_enable) {
@@ -564,9 +579,6 @@ static int intel_dp_aux_vesa_setup_backlight(struct intel_connector *connector,
}
panel->backlight.level = intel_dp_aux_vesa_get_backlight(connector, 0);
panel->backlight.enabled = panel->backlight.level != 0;
- drm_dbg_kms(display->drm,
- "[CONNECTOR:%d:%s] AUX VESA Nits backlight level is controlled through DPCD\n",
- connector->base.base.id, connector->base.name);
} else if (panel->backlight.edp.vesa.info.aux_set) {
panel->backlight.max = panel->backlight.edp.vesa.info.max;
panel->backlight.min = 0;
@@ -644,9 +656,10 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
struct intel_dp *intel_dp = intel_attached_dp(connector);
struct drm_device *dev = connector->base.dev;
struct intel_panel *panel = &connector->panel;
- bool try_intel_interface = false, try_vesa_interface = false;
+ bool try_intel_interface = false;
- /* Check the VBT and user's module parameters to figure out which
+ /*
+ * Check the VBT and user's module parameters to figure out which
* interfaces to probe
*/
switch (display->params.enable_dpcd_backlight) {
@@ -655,7 +668,6 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
case INTEL_DP_AUX_BACKLIGHT_AUTO:
switch (panel->vbt.backlight.type) {
case INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE:
- try_vesa_interface = true;
break;
case INTEL_BACKLIGHT_DISPLAY_DDI:
try_intel_interface = true;
@@ -668,20 +680,12 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
if (panel->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE)
try_intel_interface = true;
- try_vesa_interface = true;
- break;
- case INTEL_DP_AUX_BACKLIGHT_FORCE_VESA:
- try_vesa_interface = true;
break;
case INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL:
try_intel_interface = true;
break;
}
- /* For eDP 1.5 and above we are supposed to use VESA interface for brightness control */
- if (intel_dp->edp_dpcd[0] >= DP_EDP_15)
- try_vesa_interface = true;
-
/*
* Since Intel has their own backlight control interface, the majority of machines out there
* using DPCD backlight controls with Intel GPUs will be using this interface as opposed to
@@ -694,16 +698,19 @@ int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
* panel with Intel's OUI - which is also required for us to be able to detect Intel's
* backlight interface at all. This means that the only sensible way for us to detect both
* interfaces is to probe for Intel's first, and VESA's second.
+ *
+ * Also there is a chance some VBTs may advertise false Intel backlight support even if the
+ * TCON DPCD says otherwise. This means we keep VESA interface as fallback in that case.
*/
- if (try_intel_interface && intel_dp_aux_supports_hdr_backlight(connector) &&
- intel_dp->edp_dpcd[0] <= DP_EDP_14b) {
+ if (try_intel_interface && intel_dp->edp_dpcd[0] <= DP_EDP_14b &&
+ intel_dp_aux_supports_hdr_backlight(connector)) {
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Using Intel proprietary eDP backlight controls\n",
connector->base.base.id, connector->base.name);
panel->backlight.funcs = &intel_dp_hdr_bl_funcs;
return 0;
}
- if (try_vesa_interface && intel_dp_aux_supports_vesa_backlight(connector)) {
+ if (intel_dp_aux_supports_vesa_backlight(connector)) {
drm_dbg_kms(dev, "[CONNECTOR:%d:%s] Using VESA eDP backlight controls\n",
connector->base.base.id, connector->base.name);
panel->backlight.funcs = &intel_dp_vesa_bl_funcs;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 70a21685a3e1..887b6de14e46 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -43,6 +43,7 @@
#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_display_utils.h"
+#include "intel_display_wa.h"
#include "intel_dp.h"
#include "intel_dp_hdcp.h"
#include "intel_dp_link_training.h"
@@ -595,39 +596,22 @@ mst_stream_compute_config_limits(struct intel_dp *intel_dp,
dsc);
}
-static int mst_stream_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config,
- struct drm_connector_state *conn_state)
+static int mst_stream_compute_link_for_joined_pipes(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state,
+ int num_joined_pipes)
{
struct intel_display *display = to_intel_display(encoder);
- struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
- struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_dp *intel_dp = to_primary_dp(encoder);
- struct intel_connector *connector =
- to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode =
&pipe_config->hw.adjusted_mode;
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
struct link_config_limits limits;
bool dsc_needed, joiner_needs_dsc;
- int num_joined_pipes;
int ret = 0;
- if (pipe_config->fec_enable &&
- !intel_dp_supports_fec(intel_dp, connector, pipe_config))
- return -EINVAL;
-
- if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return -EINVAL;
-
- num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, connector,
- adjusted_mode->crtc_hdisplay,
- adjusted_mode->crtc_clock);
- if (num_joined_pipes > 1)
- pipe_config->joiner_pipes = GENMASK(crtc->pipe + num_joined_pipes - 1, crtc->pipe);
-
- pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
- pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
- pipe_config->has_pch_encoder = false;
+ intel_dp_dsc_reset_config(pipe_config);
joiner_needs_dsc = intel_dp_joiner_needs_dsc(display, num_joined_pipes);
@@ -642,7 +626,12 @@ static int mst_stream_compute_config(struct intel_encoder *encoder,
if (ret == -EDEADLK)
return ret;
- if (ret)
+ if (ret ||
+ !intel_dp_dotclk_valid(display,
+ adjusted_mode->clock,
+ adjusted_mode->htotal,
+ 0,
+ num_joined_pipes))
dsc_needed = true;
}
@@ -653,6 +642,8 @@ static int mst_stream_compute_config(struct intel_encoder *encoder,
/* enable compression if the mode doesn't fit available BW */
if (dsc_needed) {
+ int dsc_slice_count;
+
drm_dbg_kms(display->drm, "Try DSC (fallback=%s, joiner=%s, force=%s)\n",
str_yes_no(ret), str_yes_no(joiner_needs_dsc),
str_yes_no(intel_dp->force_dsc_en));
@@ -683,6 +674,66 @@ static int mst_stream_compute_config(struct intel_encoder *encoder,
ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
conn_state, &limits,
pipe_config->dp_m_n.tu);
+ if (ret)
+ return ret;
+
+ dsc_slice_count = intel_dp_mst_dsc_get_slice_count(connector, pipe_config);
+
+ if (!intel_dp_dotclk_valid(display,
+ adjusted_mode->clock,
+ adjusted_mode->htotal,
+ dsc_slice_count,
+ num_joined_pipes))
+ return -EINVAL;
+ }
+
+ if (ret)
+ return ret;
+
+ ret = intel_dp_compute_min_hblank(pipe_config, conn_state);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int mst_stream_compute_config(struct intel_encoder *encoder,
+ struct intel_crtc_state *pipe_config,
+ struct drm_connector_state *conn_state)
+{
+ struct intel_display *display = to_intel_display(encoder);
+ struct intel_atomic_state *state = to_intel_atomic_state(conn_state->state);
+ struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
+ struct intel_dp *intel_dp = to_primary_dp(encoder);
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ const struct drm_display_mode *adjusted_mode =
+ &pipe_config->hw.adjusted_mode;
+ int num_joined_pipes;
+ int ret = -EINVAL;
+
+ if (pipe_config->fec_enable &&
+ !intel_dp_supports_fec(intel_dp, connector, pipe_config))
+ return -EINVAL;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return -EINVAL;
+
+ pipe_config->sink_format = INTEL_OUTPUT_FORMAT_RGB;
+ pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
+ pipe_config->has_pch_encoder = false;
+
+ for_each_joiner_candidate(connector, adjusted_mode, num_joined_pipes) {
+ if (num_joined_pipes > 1)
+ pipe_config->joiner_pipes = GENMASK(crtc->pipe + num_joined_pipes - 1,
+ crtc->pipe);
+
+ ret = mst_stream_compute_link_for_joined_pipes(encoder,
+ pipe_config,
+ conn_state,
+ num_joined_pipes);
+ if (ret == 0 || ret == -EDEADLK)
+ break;
}
if (ret)
@@ -695,10 +746,6 @@ static int mst_stream_compute_config(struct intel_encoder *encoder,
pipe_config->lane_lat_optim_mask =
bxt_dpio_phy_calc_lane_lat_optim_mask(pipe_config->lane_count);
- ret = intel_dp_compute_min_hblank(pipe_config, conn_state);
- if (ret)
- return ret;
-
intel_vrr_compute_config(pipe_config, conn_state);
intel_dp_audio_compute_config(encoder, pipe_config, conn_state);
@@ -1230,7 +1277,7 @@ static void enable_bs_jitter_was(const struct intel_crtc_state *crtc_state)
set |= DP_MST_FEC_BS_JITTER_WA(crtc_state->cpu_transcoder);
/* Wa_14014143976:adlp */
- if (IS_DISPLAY_STEP(display, STEP_E0, STEP_FOREVER)) {
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_14014143976)) {
if (intel_dp_is_uhbr(crtc_state))
set |= DP_MST_SHORT_HBLANK_WA(crtc_state->cpu_transcoder);
else if (crtc_state->fec_enable)
@@ -1419,11 +1466,11 @@ mst_connector_mode_valid_ctx(struct drm_connector *_connector,
struct intel_dp *intel_dp = connector->mst.dp;
struct drm_dp_mst_topology_mgr *mgr = &intel_dp->mst.mgr;
struct drm_dp_mst_port *port = connector->mst.port;
- const int min_bpp = 18;
- int max_dotclk = display->cdclk.max_dotclk_freq;
int max_rate, mode_rate, max_lanes, max_link_clock;
unsigned long bw_overhead_flags =
DRM_DP_BW_OVERHEAD_MST | DRM_DP_BW_OVERHEAD_SSC_REF_CLK;
+ int min_link_bpp_x16 = fxp_q4_from_int(18);
+ static bool supports_dsc;
int ret;
bool dsc = false;
int target_clock = mode->clock;
@@ -1448,6 +1495,13 @@ mst_connector_mode_valid_ctx(struct drm_connector *_connector,
return 0;
}
+ supports_dsc = intel_dp_has_dsc(connector) &&
+ drm_dp_sink_supports_fec(connector->dp.fec_capability);
+
+ if (supports_dsc && connector->mst.port->passthrough_aux)
+ min_link_bpp_x16 = intel_dp_compute_min_compressed_bpp_x16(connector,
+ INTEL_OUTPUT_FORMAT_RGB);
+
max_link_clock = intel_dp_max_link_rate(intel_dp);
max_lanes = intel_dp_max_lane_count(intel_dp);
@@ -1455,12 +1509,19 @@ mst_connector_mode_valid_ctx(struct drm_connector *_connector,
max_link_clock, max_lanes);
mode_rate = intel_dp_link_required(max_link_clock, max_lanes,
mode->clock, mode->hdisplay,
- fxp_q4_from_int(min_bpp),
+ min_link_bpp_x16,
bw_overhead_flags);
/*
* TODO:
* - Also check if compression would allow for the mode
+ * in non-passthrough mode, i.e. the last branch device
+ * decompressing the stream. This makes a difference only if
+ * the BW on the link between the last branch device and the
+ * sink is higher than the BW on the whole MST path from the
+ * source to the last branch device. Relying on the extra BW
+ * this provides also requires the
+ * DFP_Link_Available_Payload_Bandwidth_Number described below.
* - Calculate the overhead using drm_dp_bw_overhead() /
* drm_dp_bw_channel_coding_efficiency(), similarly to the
* compute config code, as drm_dp_calc_pbn_mode() doesn't
@@ -1470,49 +1531,73 @@ mst_connector_mode_valid_ctx(struct drm_connector *_connector,
* corresponding link capabilities of the sink) in case the
* stream is uncompressed for it by the last branch device.
*/
- num_joined_pipes = intel_dp_num_joined_pipes(intel_dp, connector,
- mode->hdisplay, target_clock);
- max_dotclk *= num_joined_pipes;
-
ret = drm_modeset_lock(&mgr->base.lock, ctx);
if (ret)
return ret;
- if (mode_rate > max_rate || mode->clock > max_dotclk ||
- drm_dp_calc_pbn_mode(mode->clock, min_bpp << 4) > port->full_pbn) {
+ if (mode_rate > max_rate ||
+ drm_dp_calc_pbn_mode(mode->clock, min_link_bpp_x16) > port->full_pbn) {
*status = MODE_CLOCK_HIGH;
return 0;
}
- if (intel_dp_has_dsc(connector) && drm_dp_sink_supports_fec(connector->dp.fec_capability)) {
- /*
- * TBD pass the connector BPC,
- * for now U8_MAX so that max BPC on that platform would be picked
- */
- int pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, U8_MAX);
+ *status = MODE_CLOCK_HIGH;
+ for_each_joiner_candidate(connector, mode, num_joined_pipes) {
+ int dsc_slice_count = 0;
- if (!drm_dp_is_uhbr_rate(max_link_clock))
- bw_overhead_flags |= DRM_DP_BW_OVERHEAD_FEC;
+ if (supports_dsc) {
+ /*
+ * TBD pass the connector BPC,
+ * for now U8_MAX so that max BPC on that platform would be picked
+ */
+ int pipe_bpp = intel_dp_dsc_compute_max_bpp(connector, U8_MAX);
+
+ dsc_slice_count = intel_dp_dsc_get_slice_count(connector,
+ mode->clock,
+ mode->hdisplay,
+ num_joined_pipes);
+
+ if (!drm_dp_is_uhbr_rate(max_link_clock))
+ bw_overhead_flags |= DRM_DP_BW_OVERHEAD_FEC;
+
+ dsc = intel_dp_mode_valid_with_dsc(connector,
+ max_link_clock, max_lanes,
+ target_clock, mode->hdisplay,
+ num_joined_pipes,
+ INTEL_OUTPUT_FORMAT_RGB, pipe_bpp,
+ bw_overhead_flags);
+ }
- dsc = intel_dp_mode_valid_with_dsc(connector,
- max_link_clock, max_lanes,
- target_clock, mode->hdisplay,
- num_joined_pipes,
- INTEL_OUTPUT_FORMAT_RGB, pipe_bpp,
- bw_overhead_flags);
- }
+ if (intel_dp_joiner_needs_dsc(display, num_joined_pipes) && !dsc) {
+ *status = MODE_CLOCK_HIGH;
+ continue;
+ }
- if (intel_dp_joiner_needs_dsc(display, num_joined_pipes) && !dsc) {
- *status = MODE_CLOCK_HIGH;
- return 0;
- }
+ if (mode_rate > max_rate && !dsc) {
+ *status = MODE_CLOCK_HIGH;
+ continue;
+ }
- if (mode_rate > max_rate && !dsc) {
- *status = MODE_CLOCK_HIGH;
- return 0;
+ *status = intel_mode_valid_max_plane_size(display, mode, num_joined_pipes);
+
+ if (*status != MODE_OK)
+ continue;
+
+ if (!dsc)
+ dsc_slice_count = 0;
+
+ if (!intel_dp_dotclk_valid(display,
+ mode->clock,
+ mode->htotal,
+ dsc_slice_count,
+ num_joined_pipes)) {
+ *status = MODE_CLOCK_HIGH;
+ continue;
+ }
+
+ break;
}
- *status = intel_mode_valid_max_plane_size(display, mode, num_joined_pipes);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
index 83865c02d477..1fd1ac8d556d 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
@@ -54,31 +54,20 @@ static int kbytes_to_mbits(int kbytes)
return DIV_ROUND_UP(kbytes * 8, 1000);
}
-static int get_current_link_bw(struct intel_dp *intel_dp,
- bool *below_dprx_bw)
+static int get_current_link_bw(struct intel_dp *intel_dp)
{
int rate = intel_dp_max_common_rate(intel_dp);
int lane_count = intel_dp_max_common_lane_count(intel_dp);
- int bw;
- bw = intel_dp_max_link_data_rate(intel_dp, rate, lane_count);
- *below_dprx_bw = bw < drm_dp_max_dprx_data_rate(rate, lane_count);
-
- return bw;
+ return intel_dp_max_link_data_rate(intel_dp, rate, lane_count);
}
-static int update_tunnel_state(struct intel_dp *intel_dp)
+static int __update_tunnel_state(struct intel_dp *intel_dp, bool force_sink_update)
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
- bool old_bw_below_dprx;
- bool new_bw_below_dprx;
- int old_bw;
- int new_bw;
int ret;
- old_bw = get_current_link_bw(intel_dp, &old_bw_below_dprx);
-
ret = drm_dp_tunnel_update_state(intel_dp->tunnel);
if (ret < 0) {
drm_dbg_kms(display->drm,
@@ -90,18 +79,26 @@ static int update_tunnel_state(struct intel_dp *intel_dp)
return ret;
}
- if (ret == 0 ||
- !drm_dp_tunnel_bw_alloc_is_enabled(intel_dp->tunnel))
+ if (!force_sink_update &&
+ (ret == 0 || !drm_dp_tunnel_bw_alloc_is_enabled(intel_dp->tunnel)))
return 0;
intel_dp_update_sink_caps(intel_dp);
- new_bw = get_current_link_bw(intel_dp, &new_bw_below_dprx);
+ return 0;
+}
+
+static bool has_tunnel_bw_changed(struct intel_dp *intel_dp, int old_bw)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
+ int new_bw;
+
+ new_bw = get_current_link_bw(intel_dp);
/* Suppress the notification if the mode list can't change due to bw. */
- if (old_bw_below_dprx == new_bw_below_dprx &&
- !new_bw_below_dprx)
- return 0;
+ if (old_bw == new_bw)
+ return false;
drm_dbg_kms(display->drm,
"[DPTUN %s][ENCODER:%d:%s] Notify users about BW change: %d -> %d\n",
@@ -109,7 +106,29 @@ static int update_tunnel_state(struct intel_dp *intel_dp)
encoder->base.base.id, encoder->base.name,
kbytes_to_mbits(old_bw), kbytes_to_mbits(new_bw));
- return 1;
+ return true;
+}
+
+/*
+ * Returns:
+ * - 0 in case of success - if there wasn't any change in the tunnel state
+ * requiring a user notification
+ * - 1 in case of success - if there was a change in the tunnel state
+ * requiring a user notification
+ * - Negative error code if updating the tunnel state failed
+ */
+static int update_tunnel_state(struct intel_dp *intel_dp)
+{
+ int old_bw;
+ int err;
+
+ old_bw = get_current_link_bw(intel_dp);
+
+ err = __update_tunnel_state(intel_dp, false);
+ if (err)
+ return err;
+
+ return has_tunnel_bw_changed(intel_dp, old_bw) ? 1 : 0;
}
/*
@@ -150,11 +169,9 @@ static int allocate_initial_tunnel_bw_for_pipes(struct intel_dp *intel_dp, u8 pi
drm_dp_tunnel_name(intel_dp->tunnel),
encoder->base.base.id, encoder->base.name,
ERR_PTR(err));
-
- return err;
}
- return update_tunnel_state(intel_dp);
+ return err;
}
static int allocate_initial_tunnel_bw(struct intel_dp *intel_dp,
@@ -170,13 +187,24 @@ static int allocate_initial_tunnel_bw(struct intel_dp *intel_dp,
return allocate_initial_tunnel_bw_for_pipes(intel_dp, pipe_mask);
}
+/*
+ * Returns:
+ * - 0 in case of success - after any tunnel detected and added to @intel_dp
+ * - 1 in case of success - after a tunnel detected and added to @intel_dp,
+ * where the link BW via the tunnel changed in a way requiring a user
+ * notification
+ * - Negative error code if the tunnel detection failed
+ */
static int detect_new_tunnel(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx)
{
struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_dp_tunnel *tunnel;
+ int old_bw;
int ret;
+ old_bw = get_current_link_bw(intel_dp);
+
tunnel = drm_dp_tunnel_detect(display->dp_tunnel_mgr,
&intel_dp->aux);
if (IS_ERR(tunnel))
@@ -200,10 +228,17 @@ static int detect_new_tunnel(struct intel_dp *intel_dp, struct drm_modeset_acqui
}
ret = allocate_initial_tunnel_bw(intel_dp, ctx);
- if (ret < 0)
+ if (ret < 0) {
intel_dp_tunnel_destroy(intel_dp);
- return ret;
+ return ret;
+ }
+
+ ret = __update_tunnel_state(intel_dp, true);
+ if (ret)
+ return ret;
+
+ return has_tunnel_bw_changed(intel_dp, old_bw) ? 1 : 0;
}
/**
@@ -221,9 +256,12 @@ static int detect_new_tunnel(struct intel_dp *intel_dp, struct drm_modeset_acqui
* tunnel. If the tunnel's state change requires this - for instance the
* tunnel's group ID has changed - the tunnel will be dropped and recreated.
*
- * Return 0 in case of success - after any tunnel detected and added to
- * @intel_dp - 1 in case the BW on an already existing tunnel has changed in a
- * way that requires notifying user space.
+ * Returns:
+ * - 0 in case of success - after any tunnel detected and added to @intel_dp
+ * - 1 in case the link BW via the new or an already existing tunnel has changed
+ * in a way that requires notifying user space
+ * - Negative error code, if creating a new tunnel or updating the tunnel
+ * state failed
*/
int intel_dp_tunnel_detect(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index a4f372c9e6fc..8433e3ff0319 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -1219,6 +1219,7 @@ static int xe3plpd_crtc_compute_clock(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder =
intel_get_crtc_new_encoder(state, crtc_state);
+ struct intel_display *display = to_intel_display(encoder);
int ret;
ret = intel_lt_phy_pll_calc_state(crtc_state, encoder);
@@ -1227,7 +1228,7 @@ static int xe3plpd_crtc_compute_clock(struct intel_atomic_state *state,
/* TODO: Do the readback via intel_compute_shared_dplls() */
crtc_state->port_clock =
- intel_lt_phy_calc_port_clock(encoder, crtc_state);
+ intel_lt_phy_calc_port_clock(display, &crtc_state->dpll_hw_state.ltpll);
crtc_state->hw.adjusted_mode.crtc_clock = intel_crtc_dotclock(crtc_state);
@@ -2333,3 +2334,8 @@ void assert_pll_disabled(struct intel_display *display, enum pipe pipe)
{
assert_pll(display, pipe, false);
}
+
+bool intel_dpll_clock_matches(int clock1, int clock2)
+{
+ return abs(clock1 - clock2) <= 1;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.h b/drivers/gpu/drm/i915/display/intel_dpll.h
index 3444a2dd3166..8cd0d17e974e 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll.h
@@ -48,5 +48,6 @@ void chv_crtc_clock_get(struct intel_crtc_state *crtc_state);
void assert_pll_enabled(struct intel_display *display, enum pipe pipe);
void assert_pll_disabled(struct intel_display *display, enum pipe pipe);
+bool intel_dpll_clock_matches(int clock1, int clock2);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index 9aa84a430f09..f35a9252f4e1 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -38,6 +38,7 @@
#include "intel_dpll.h"
#include "intel_dpll_mgr.h"
#include "intel_hti.h"
+#include "intel_lt_phy.h"
#include "intel_mg_phy_regs.h"
#include "intel_pch_refclk.h"
#include "intel_step.h"
@@ -4613,7 +4614,7 @@ void intel_dpll_init(struct intel_display *display)
dpll_mgr = &pch_pll_mgr;
if (!dpll_mgr)
- return;
+ goto out_verify;
dpll_info = dpll_mgr->dpll_info;
@@ -4632,6 +4633,14 @@ void intel_dpll_init(struct intel_display *display)
display->dpll.mgr = dpll_mgr;
display->dpll.num_dpll = i;
+
+out_verify:
+ /*
+ * TODO: Convert these to a KUnit test or dependent on a kconfig
+ * debug option.
+ */
+ intel_cx0pll_verify_plls(display);
+ intel_lt_phy_verify_plls(display);
}
/**
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index 5b71c860515f..4cc14ce5eebe 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -241,14 +241,12 @@ struct intel_mpllb_state {
};
struct intel_c10pll_state {
- u32 clock; /* in KHz */
u8 tx;
u8 cmn;
u8 pll[20];
};
struct intel_c20pll_state {
- u32 clock; /* in kHz */
u16 tx[3];
u16 cmn[4];
union {
@@ -274,7 +272,6 @@ struct intel_cx0pll_state {
};
struct intel_lt_phy_pll_state {
- u32 clock; /* in kHz */
u8 addr_msb[13];
u8 addr_lsb[13];
u8 data[13][4];
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c
index da472371c7d7..145dc9511116 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.c
+++ b/drivers/gpu/drm/i915/display/intel_dpt.c
@@ -1,202 +1,51 @@
// SPDX-License-Identifier: MIT
/*
- * Copyright © 2021 Intel Corporation
+ * Copyright © 2023 Intel Corporation
*/
-#include <drm/drm_print.h>
-
-#include "gem/i915_gem_domain.h"
-#include "gem/i915_gem_internal.h"
-#include "gem/i915_gem_lmem.h"
-#include "gt/gen8_ppgtt.h"
-
-#include "i915_drv.h"
-#include "intel_display_core.h"
-#include "intel_display_rpm.h"
+#include "intel_de.h"
+#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_dpt.h"
-#include "intel_fb.h"
-
-struct i915_dpt {
- struct i915_address_space vm;
-
- struct drm_i915_gem_object *obj;
- struct i915_vma *vma;
- void __iomem *iomem;
-};
-
-#define i915_is_dpt(vm) ((vm)->is_dpt)
-
-static inline struct i915_dpt *
-i915_vm_to_dpt(struct i915_address_space *vm)
-{
- BUILD_BUG_ON(offsetof(struct i915_dpt, vm));
- drm_WARN_ON(&vm->i915->drm, !i915_is_dpt(vm));
- return container_of(vm, struct i915_dpt, vm);
-}
-
-static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
-{
- writeq(pte, addr);
-}
-
-static void dpt_insert_page(struct i915_address_space *vm,
- dma_addr_t addr,
- u64 offset,
- unsigned int pat_index,
- u32 flags)
-{
- struct i915_dpt *dpt = i915_vm_to_dpt(vm);
- gen8_pte_t __iomem *base = dpt->iomem;
-
- gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE,
- vm->pte_encode(addr, pat_index, flags));
-}
-
-static void dpt_insert_entries(struct i915_address_space *vm,
- struct i915_vma_resource *vma_res,
- unsigned int pat_index,
- u32 flags)
-{
- struct i915_dpt *dpt = i915_vm_to_dpt(vm);
- gen8_pte_t __iomem *base = dpt->iomem;
- const gen8_pte_t pte_encode = vm->pte_encode(0, pat_index, flags);
- struct sgt_iter sgt_iter;
- dma_addr_t addr;
- int i;
-
- /*
- * Note that we ignore PTE_READ_ONLY here. The caller must be careful
- * not to allow the user to override access to a read only page.
- */
-
- i = vma_res->start / I915_GTT_PAGE_SIZE;
- for_each_sgt_daddr(addr, sgt_iter, vma_res->bi.pages)
- gen8_set_pte(&base[i++], pte_encode | addr);
-}
-
-static void dpt_clear_range(struct i915_address_space *vm,
- u64 start, u64 length)
-{
-}
-
-static void dpt_bind_vma(struct i915_address_space *vm,
- struct i915_vm_pt_stash *stash,
- struct i915_vma_resource *vma_res,
- unsigned int pat_index,
- u32 flags)
-{
- u32 pte_flags;
-
- if (vma_res->bound_flags)
- return;
-
- /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
- pte_flags = 0;
- if (vm->has_read_only && vma_res->bi.readonly)
- pte_flags |= PTE_READ_ONLY;
- if (vma_res->bi.lmem)
- pte_flags |= PTE_LM;
+#include "intel_parent.h"
+#include "skl_universal_plane_regs.h"
- vm->insert_entries(vm, vma_res, pat_index, pte_flags);
-
- vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
-
- /*
- * Without aliasing PPGTT there's no difference between
- * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
- * upgrade to both bound if we bind either to avoid double-binding.
- */
- vma_res->bound_flags = I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
-}
-
-static void dpt_unbind_vma(struct i915_address_space *vm,
- struct i915_vma_resource *vma_res)
-{
- vm->clear_range(vm, vma_res->start, vma_res->vma_size);
-}
-
-static void dpt_cleanup(struct i915_address_space *vm)
+void intel_dpt_configure(struct intel_crtc *crtc)
{
- struct i915_dpt *dpt = i915_vm_to_dpt(vm);
-
- i915_gem_object_put(dpt->obj);
-}
-
-struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
- unsigned int alignment)
-{
- struct drm_i915_private *i915 = vm->i915;
- struct intel_display *display = i915->display;
- struct i915_dpt *dpt = i915_vm_to_dpt(vm);
- struct ref_tracker *wakeref;
- struct i915_vma *vma;
- void __iomem *iomem;
- struct i915_gem_ww_ctx ww;
- u64 pin_flags = 0;
- int err;
-
- if (i915_gem_object_is_stolen(dpt->obj))
- pin_flags |= PIN_MAPPABLE;
-
- wakeref = intel_display_rpm_get(display);
- atomic_inc(&display->restore.pending_fb_pin);
-
- for_i915_gem_ww(&ww, err, true) {
- err = i915_gem_object_lock(dpt->obj, &ww);
- if (err)
- continue;
+ struct intel_display *display = to_intel_display(crtc);
- vma = i915_gem_object_ggtt_pin_ww(dpt->obj, &ww, NULL, 0,
- alignment, pin_flags);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- continue;
- }
+ if (DISPLAY_VER(display) == 14) {
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
- iomem = i915_vma_pin_iomap(vma);
- i915_vma_unpin(vma);
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ if (plane_id == PLANE_CURSOR)
+ continue;
- if (IS_ERR(iomem)) {
- err = PTR_ERR(iomem);
- continue;
+ intel_de_rmw(display, PLANE_CHICKEN(pipe, plane_id),
+ PLANE_CHICKEN_DISABLE_DPT,
+ display->params.enable_dpt ? 0 :
+ PLANE_CHICKEN_DISABLE_DPT);
}
-
- dpt->vma = vma;
- dpt->iomem = iomem;
-
- i915_vma_get(vma);
+ } else if (DISPLAY_VER(display) == 13) {
+ intel_de_rmw(display, CHICKEN_MISC_2,
+ CHICKEN_MISC_DISABLE_DPT,
+ display->params.enable_dpt ? 0 :
+ CHICKEN_MISC_DISABLE_DPT);
}
-
- dpt->obj->mm.dirty = true;
-
- atomic_dec(&display->restore.pending_fb_pin);
- intel_display_rpm_put(display, wakeref);
-
- return err ? ERR_PTR(err) : vma;
-}
-
-void intel_dpt_unpin_from_ggtt(struct i915_address_space *vm)
-{
- struct i915_dpt *dpt = i915_vm_to_dpt(vm);
-
- i915_vma_unpin_iomap(dpt->vma);
- i915_vma_put(dpt->vma);
}
/**
- * intel_dpt_resume - restore the memory mapping for all DPT FBs during system resume
+ * intel_dpt_suspend - suspend the memory mapping for all DPT FBs during system suspend
* @display: display device instance
*
- * Restore the memory mapping during system resume for all framebuffers which
- * are mapped to HW via a GGTT->DPT page table. The content of these page
- * tables are not stored in the hibernation image during S4 and S3RST->S4
- * transitions, so here we reprogram the PTE entries in those tables.
+ * Suspend the memory mapping during system suspend for all framebuffers which
+ * are mapped to HW via a GGTT->DPT page table.
*
- * This function must be called after the mappings in GGTT have been restored calling
- * i915_ggtt_resume().
+ * This function must be called before the mappings in GGTT are suspended calling
+ * i915_ggtt_suspend().
*/
-void intel_dpt_resume(struct intel_display *display)
+void intel_dpt_suspend(struct intel_display *display)
{
struct drm_framebuffer *drm_fb;
@@ -204,26 +53,30 @@ void intel_dpt_resume(struct intel_display *display)
return;
mutex_lock(&display->drm->mode_config.fb_lock);
+
drm_for_each_fb(drm_fb, display->drm) {
struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
- if (fb->dpt_vm)
- i915_ggtt_resume_vm(fb->dpt_vm, true);
+ if (fb->dpt)
+ intel_parent_dpt_suspend(display, fb->dpt);
}
+
mutex_unlock(&display->drm->mode_config.fb_lock);
}
/**
- * intel_dpt_suspend - suspend the memory mapping for all DPT FBs during system suspend
+ * intel_dpt_resume - restore the memory mapping for all DPT FBs during system resume
* @display: display device instance
*
- * Suspend the memory mapping during system suspend for all framebuffers which
- * are mapped to HW via a GGTT->DPT page table.
+ * Restore the memory mapping during system resume for all framebuffers which
+ * are mapped to HW via a GGTT->DPT page table. The content of these page
+ * tables are not stored in the hibernation image during S4 and S3RST->S4
+ * transitions, so here we reprogram the PTE entries in those tables.
*
- * This function must be called before the mappings in GGTT are suspended calling
- * i915_ggtt_suspend().
+ * This function must be called after the mappings in GGTT have been restored calling
+ * i915_ggtt_resume().
*/
-void intel_dpt_suspend(struct intel_display *display)
+void intel_dpt_resume(struct intel_display *display)
{
struct drm_framebuffer *drm_fb;
@@ -231,96 +84,11 @@ void intel_dpt_suspend(struct intel_display *display)
return;
mutex_lock(&display->drm->mode_config.fb_lock);
-
drm_for_each_fb(drm_fb, display->drm) {
struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
- if (fb->dpt_vm)
- i915_ggtt_suspend_vm(fb->dpt_vm, true);
+ if (fb->dpt)
+ intel_parent_dpt_resume(display, fb->dpt);
}
-
mutex_unlock(&display->drm->mode_config.fb_lock);
}
-
-struct i915_address_space *
-intel_dpt_create(struct intel_framebuffer *fb)
-{
- struct drm_gem_object *obj = intel_fb_bo(&fb->base);
- struct drm_i915_private *i915 = to_i915(obj->dev);
- struct drm_i915_gem_object *dpt_obj;
- struct i915_address_space *vm;
- struct i915_dpt *dpt;
- size_t size;
- int ret;
-
- if (intel_fb_needs_pot_stride_remap(fb))
- size = intel_remapped_info_size(&fb->remapped_view.gtt.remapped);
- else
- size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE);
-
- size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE);
-
- dpt_obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_CONTIGUOUS);
- if (IS_ERR(dpt_obj) && i915_ggtt_has_aperture(to_gt(i915)->ggtt))
- dpt_obj = i915_gem_object_create_stolen(i915, size);
- if (IS_ERR(dpt_obj) && !HAS_LMEM(i915)) {
- drm_dbg_kms(&i915->drm, "Allocating dpt from smem\n");
- dpt_obj = i915_gem_object_create_shmem(i915, size);
- }
- if (IS_ERR(dpt_obj))
- return ERR_CAST(dpt_obj);
-
- ret = i915_gem_object_lock_interruptible(dpt_obj, NULL);
- if (!ret) {
- ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE);
- i915_gem_object_unlock(dpt_obj);
- }
- if (ret) {
- i915_gem_object_put(dpt_obj);
- return ERR_PTR(ret);
- }
-
- dpt = kzalloc_obj(*dpt);
- if (!dpt) {
- i915_gem_object_put(dpt_obj);
- return ERR_PTR(-ENOMEM);
- }
-
- vm = &dpt->vm;
-
- vm->gt = to_gt(i915);
- vm->i915 = i915;
- vm->dma = i915->drm.dev;
- vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
- vm->is_dpt = true;
-
- i915_address_space_init(vm, VM_CLASS_DPT);
-
- vm->insert_page = dpt_insert_page;
- vm->clear_range = dpt_clear_range;
- vm->insert_entries = dpt_insert_entries;
- vm->cleanup = dpt_cleanup;
-
- vm->vma_ops.bind_vma = dpt_bind_vma;
- vm->vma_ops.unbind_vma = dpt_unbind_vma;
-
- vm->pte_encode = vm->gt->ggtt->vm.pte_encode;
-
- dpt->obj = dpt_obj;
- dpt->obj->is_dpt = true;
-
- return &dpt->vm;
-}
-
-void intel_dpt_destroy(struct i915_address_space *vm)
-{
- struct i915_dpt *dpt = i915_vm_to_dpt(vm);
-
- dpt->obj->is_dpt = false;
- i915_vm_put(&dpt->vm);
-}
-
-u64 intel_dpt_offset(struct i915_vma *dpt_vma)
-{
- return i915_vma_offset(dpt_vma);
-}
diff --git a/drivers/gpu/drm/i915/display/intel_dpt.h b/drivers/gpu/drm/i915/display/intel_dpt.h
index db521401b828..11bd495693b2 100644
--- a/drivers/gpu/drm/i915/display/intel_dpt.h
+++ b/drivers/gpu/drm/i915/display/intel_dpt.h
@@ -1,26 +1,16 @@
/* SPDX-License-Identifier: MIT */
/*
- * Copyright © 2021 Intel Corporation
+ * Copyright © 2023 Intel Corporation
*/
-#ifndef __INTEL_DPT_H__
-#define __INTEL_DPT_H__
+#ifndef __INTEL_DPT_COMMON_H__
+#define __INTEL_DPT_COMMON_H__
-#include <linux/types.h>
-
-struct i915_address_space;
-struct i915_vma;
+struct intel_crtc;
struct intel_display;
-struct intel_framebuffer;
-void intel_dpt_destroy(struct i915_address_space *vm);
-struct i915_vma *intel_dpt_pin_to_ggtt(struct i915_address_space *vm,
- unsigned int alignment);
-void intel_dpt_unpin_from_ggtt(struct i915_address_space *vm);
+void intel_dpt_configure(struct intel_crtc *crtc);
void intel_dpt_suspend(struct intel_display *display);
void intel_dpt_resume(struct intel_display *display);
-struct i915_address_space *
-intel_dpt_create(struct intel_framebuffer *fb);
-u64 intel_dpt_offset(struct i915_vma *dpt_vma);
-#endif /* __INTEL_DPT_H__ */
+#endif /* __INTEL_DPT_COMMON_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dpt_common.c b/drivers/gpu/drm/i915/display/intel_dpt_common.c
deleted file mode 100644
index 5eb88d51dba1..000000000000
--- a/drivers/gpu/drm/i915/display/intel_dpt_common.c
+++ /dev/null
@@ -1,35 +0,0 @@
-// SPDX-License-Identifier: MIT
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#include "intel_de.h"
-#include "intel_display_regs.h"
-#include "intel_display_types.h"
-#include "intel_dpt_common.h"
-#include "skl_universal_plane_regs.h"
-
-void intel_dpt_configure(struct intel_crtc *crtc)
-{
- struct intel_display *display = to_intel_display(crtc);
-
- if (DISPLAY_VER(display) == 14) {
- enum pipe pipe = crtc->pipe;
- enum plane_id plane_id;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- if (plane_id == PLANE_CURSOR)
- continue;
-
- intel_de_rmw(display, PLANE_CHICKEN(pipe, plane_id),
- PLANE_CHICKEN_DISABLE_DPT,
- display->params.enable_dpt ? 0 :
- PLANE_CHICKEN_DISABLE_DPT);
- }
- } else if (DISPLAY_VER(display) == 13) {
- intel_de_rmw(display, CHICKEN_MISC_2,
- CHICKEN_MISC_DISABLE_DPT,
- display->params.enable_dpt ? 0 :
- CHICKEN_MISC_DISABLE_DPT);
- }
-}
diff --git a/drivers/gpu/drm/i915/display/intel_dpt_common.h b/drivers/gpu/drm/i915/display/intel_dpt_common.h
deleted file mode 100644
index 6d7de405126a..000000000000
--- a/drivers/gpu/drm/i915/display/intel_dpt_common.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef __INTEL_DPT_COMMON_H__
-#define __INTEL_DPT_COMMON_H__
-
-struct intel_crtc;
-
-void intel_dpt_configure(struct intel_crtc *crtc);
-
-#endif /* __INTEL_DPT_COMMON_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dram.c b/drivers/gpu/drm/i915/display/intel_dram.c
index 170de304fe96..bd281d4b4c05 100644
--- a/drivers/gpu/drm/i915/display/intel_dram.c
+++ b/drivers/gpu/drm/i915/display/intel_dram.c
@@ -7,13 +7,14 @@
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_pcode_regs.h>
-#include "i915_reg.h"
#include "intel_display_core.h"
#include "intel_display_utils.h"
+#include "intel_display_regs.h"
#include "intel_dram.h"
#include "intel_mchbar_regs.h"
-#include "intel_pcode.h"
+#include "intel_parent.h"
#include "intel_uncore.h"
#include "vlv_iosf_sb.h"
@@ -692,8 +693,8 @@ static int icl_pcode_read_mem_global_info(struct intel_display *display,
u32 val = 0;
int ret;
- ret = intel_pcode_read(display->drm, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
- ICL_PCODE_MEM_SS_READ_GLOBAL_INFO, &val, NULL);
+ ret = intel_parent_pcode_read(display, ICL_PCODE_MEM_SUBSYSYSTEM_INFO |
+ ICL_PCODE_MEM_SS_READ_GLOBAL_INFO, &val, NULL);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index b5d774706fec..c8d3968f659f 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -8,6 +8,7 @@
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
+#include <drm/intel/display_parent_interface.h>
#include "intel_crtc.h"
#include "intel_de.h"
@@ -15,8 +16,8 @@
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_dsb.h"
-#include "intel_dsb_buffer.h"
#include "intel_dsb_regs.h"
+#include "intel_psr.h"
#include "intel_vblank.h"
#include "intel_vrr.h"
#include "skl_watermark.h"
@@ -75,6 +76,57 @@ struct intel_dsb {
* writes). There are no registers reads possible with DSB HW engine.
*/
+/*
+ * DSB buffer parent interface calls are here instead of intel_parent.[ch]
+ * because they're not used outside of intel_dsb.c.
+ */
+static u32 dsb_buffer_ggtt_offset(struct intel_dsb *dsb)
+{
+ struct intel_display *display = to_intel_display(dsb->crtc);
+
+ return display->parent->dsb->ggtt_offset(dsb->dsb_buf);
+}
+
+static void dsb_buffer_write(struct intel_dsb *dsb, u32 idx, u32 val)
+{
+ struct intel_display *display = to_intel_display(dsb->crtc);
+
+ display->parent->dsb->write(dsb->dsb_buf, idx, val);
+}
+
+static u32 dsb_buffer_read(struct intel_dsb *dsb, u32 idx)
+{
+ struct intel_display *display = to_intel_display(dsb->crtc);
+
+ return display->parent->dsb->read(dsb->dsb_buf, idx);
+}
+
+static void dsb_buffer_fill(struct intel_dsb *dsb, u32 idx, u32 val, size_t size)
+{
+ struct intel_display *display = to_intel_display(dsb->crtc);
+
+ display->parent->dsb->fill(dsb->dsb_buf, idx, val, size);
+}
+
+static struct intel_dsb_buffer *dsb_buffer_create(struct intel_display *display, size_t size)
+{
+ return display->parent->dsb->create(display->drm, size);
+}
+
+static void dsb_buffer_cleanup(struct intel_dsb *dsb)
+{
+ struct intel_display *display = to_intel_display(dsb->crtc);
+
+ display->parent->dsb->cleanup(dsb->dsb_buf);
+}
+
+static void dsb_buffer_flush_map(struct intel_dsb *dsb)
+{
+ struct intel_display *display = to_intel_display(dsb->crtc);
+
+ display->parent->dsb->flush_map(dsb->dsb_buf);
+}
+
/* DSB opcodes. */
#define DSB_OPCODE_SHIFT 24
#define DSB_OPCODE_NOOP 0x0
@@ -166,18 +218,24 @@ static int dsb_scanline_to_hw(struct intel_atomic_state *state,
* definitely do not want to skip vblank wait. We also have concern what comes
* to skipping vblank evasion. I.e. arming registers are latched before we have
* managed writing them. Due to these reasons we are not setting
- * DSB_SKIP_WAITS_EN.
+ * DSB_SKIP_WAITS_EN except when using TRANS_PUSH mechanism to trigger
+ * "frame change" event.
*/
static u32 dsb_chicken(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ const struct intel_crtc_state *new_crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ u32 chicken = intel_psr_use_trans_push(new_crtc_state) ?
+ DSB_SKIP_WAITS_EN : 0;
+
if (pre_commit_is_vrr_active(state, crtc))
- return DSB_CTRL_WAIT_SAFE_WINDOW |
+ chicken |= DSB_CTRL_WAIT_SAFE_WINDOW |
DSB_CTRL_NO_WAIT_VBLANK |
DSB_INST_WAIT_SAFE_WINDOW |
DSB_INST_NO_WAIT_VBLANK;
- else
- return 0;
+
+ return chicken;
}
static bool assert_dsb_has_room(struct intel_dsb *dsb)
@@ -211,10 +269,10 @@ static void intel_dsb_dump(struct intel_dsb *dsb)
for (i = 0; i < ALIGN(dsb->free_pos, 64 / 4); i += 4)
drm_dbg_kms(display->drm,
" 0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n", i * 4,
- intel_dsb_buffer_read(dsb->dsb_buf, i),
- intel_dsb_buffer_read(dsb->dsb_buf, i + 1),
- intel_dsb_buffer_read(dsb->dsb_buf, i + 2),
- intel_dsb_buffer_read(dsb->dsb_buf, i + 3));
+ dsb_buffer_read(dsb, i),
+ dsb_buffer_read(dsb, i + 1),
+ dsb_buffer_read(dsb, i + 2),
+ dsb_buffer_read(dsb, i + 3));
drm_dbg_kms(display->drm, "}\n");
}
@@ -231,12 +289,12 @@ unsigned int intel_dsb_size(struct intel_dsb *dsb)
unsigned int intel_dsb_head(struct intel_dsb *dsb)
{
- return intel_dsb_buffer_ggtt_offset(dsb->dsb_buf);
+ return dsb_buffer_ggtt_offset(dsb);
}
static unsigned int intel_dsb_tail(struct intel_dsb *dsb)
{
- return intel_dsb_buffer_ggtt_offset(dsb->dsb_buf) + intel_dsb_size(dsb);
+ return dsb_buffer_ggtt_offset(dsb) + intel_dsb_size(dsb);
}
static void intel_dsb_ins_align(struct intel_dsb *dsb)
@@ -263,8 +321,8 @@ static void intel_dsb_emit(struct intel_dsb *dsb, u32 ldw, u32 udw)
dsb->ins[0] = ldw;
dsb->ins[1] = udw;
- intel_dsb_buffer_write(dsb->dsb_buf, dsb->free_pos++, dsb->ins[0]);
- intel_dsb_buffer_write(dsb->dsb_buf, dsb->free_pos++, dsb->ins[1]);
+ dsb_buffer_write(dsb, dsb->free_pos++, dsb->ins[0]);
+ dsb_buffer_write(dsb, dsb->free_pos++, dsb->ins[1]);
}
static bool intel_dsb_prev_ins_is_write(struct intel_dsb *dsb,
@@ -335,13 +393,12 @@ void intel_dsb_reg_write_indexed(struct intel_dsb *dsb,
/* Update the count */
dsb->ins[0]++;
- intel_dsb_buffer_write(dsb->dsb_buf, dsb->ins_start_offset + 0,
- dsb->ins[0]);
+ dsb_buffer_write(dsb, dsb->ins_start_offset + 0, dsb->ins[0]);
- intel_dsb_buffer_write(dsb->dsb_buf, dsb->free_pos++, val);
+ dsb_buffer_write(dsb, dsb->free_pos++, val);
/* if number of data words is odd, then the last dword should be 0.*/
if (dsb->free_pos & 0x1)
- intel_dsb_buffer_write(dsb->dsb_buf, dsb->free_pos, 0);
+ dsb_buffer_write(dsb, dsb->free_pos, 0);
}
void intel_dsb_reg_write(struct intel_dsb *dsb,
@@ -521,8 +578,7 @@ static void intel_dsb_align_tail(struct intel_dsb *dsb)
aligned_tail = ALIGN(tail, CACHELINE_BYTES);
if (aligned_tail > tail)
- intel_dsb_buffer_memset(dsb->dsb_buf, dsb->free_pos, 0,
- aligned_tail - tail);
+ dsb_buffer_fill(dsb, dsb->free_pos, 0, aligned_tail - tail);
dsb->free_pos = aligned_tail / 4;
}
@@ -541,8 +597,7 @@ static void intel_dsb_gosub_align(struct intel_dsb *dsb)
* "Ensure GOSUB is not placed in cacheline QW slot 6 or 7 (numbered 0-7)"
*/
if (aligned_tail - tail <= 2 * 8)
- intel_dsb_buffer_memset(dsb->dsb_buf, dsb->free_pos, 0,
- aligned_tail - tail);
+ dsb_buffer_fill(dsb, dsb->free_pos, 0, aligned_tail - tail);
dsb->free_pos = aligned_tail / 4;
}
@@ -606,14 +661,14 @@ void intel_dsb_gosub_finish(struct intel_dsb *dsb)
*/
intel_dsb_noop(dsb, 8);
- intel_dsb_buffer_flush_map(dsb->dsb_buf);
+ dsb_buffer_flush_map(dsb);
}
void intel_dsb_finish(struct intel_dsb *dsb)
{
intel_dsb_align_tail(dsb);
- intel_dsb_buffer_flush_map(dsb->dsb_buf);
+ dsb_buffer_flush_map(dsb);
}
static u32 dsb_error_int_status(struct intel_display *display)
@@ -917,7 +972,7 @@ void intel_dsb_wait(struct intel_dsb *dsb)
!is_busy,
100, 1000, false);
if (ret) {
- u32 offset = intel_dsb_buffer_ggtt_offset(dsb->dsb_buf);
+ u32 offset = dsb_buffer_ggtt_offset(dsb);
intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id),
DSB_ENABLE | DSB_HALT);
@@ -983,7 +1038,7 @@ struct intel_dsb *intel_dsb_prepare(struct intel_atomic_state *state,
/* ~1 qword per instruction, full cachelines */
size = ALIGN(max_cmds * 8, CACHELINE_BYTES);
- dsb_buf = intel_dsb_buffer_create(display->drm, size);
+ dsb_buf = dsb_buffer_create(display, size);
if (IS_ERR(dsb_buf))
goto out_put_rpm;
@@ -1021,7 +1076,7 @@ out:
*/
void intel_dsb_cleanup(struct intel_dsb *dsb)
{
- intel_dsb_buffer_cleanup(dsb->dsb_buf);
+ dsb_buffer_cleanup(dsb);
kfree(dsb);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsb_buffer.h b/drivers/gpu/drm/i915/display/intel_dsb_buffer.h
deleted file mode 100644
index f4577d1f25cd..000000000000
--- a/drivers/gpu/drm/i915/display/intel_dsb_buffer.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef _INTEL_DSB_BUFFER_H
-#define _INTEL_DSB_BUFFER_H
-
-#include <linux/types.h>
-
-struct drm_device;
-struct intel_dsb_buffer;
-
-u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf);
-void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val);
-u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx);
-void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size);
-struct intel_dsb_buffer *intel_dsb_buffer_create(struct drm_device *drm, size_t size);
-void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf);
-void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf);
-
-#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c
index 38c33f2ca05c..5768619f840f 100644
--- a/drivers/gpu/drm/i915/display/intel_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fb.c
@@ -16,9 +16,7 @@
#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_display_utils.h"
-#include "intel_dpt.h"
#include "intel_fb.h"
-#include "intel_fb_bo.h"
#include "intel_frontbuffer.h"
#include "intel_parent.h"
#include "intel_plane.h"
@@ -2104,16 +2102,17 @@ int intel_plane_compute_gtt(struct intel_plane_state *plane_state)
static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
+ struct intel_display *display = to_intel_display(fb->dev);
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
drm_framebuffer_cleanup(fb);
if (intel_fb_uses_dpt(fb))
- intel_dpt_destroy(intel_fb->dpt_vm);
+ intel_parent_dpt_destroy(display, intel_fb->dpt);
- intel_fb_bo_framebuffer_fini(intel_fb_bo(fb));
+ intel_bo_framebuffer_fini(intel_fb_bo(fb));
- intel_frontbuffer_put(intel_fb->frontbuffer);
+ intel_parent_frontbuffer_put(display, intel_fb->frontbuffer);
kfree(intel_fb->panic);
kfree(intel_fb);
@@ -2221,16 +2220,16 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
return -ENOMEM;
/*
- * intel_frontbuffer_get() must be done before
- * intel_fb_bo_framebuffer_init() to avoid set_tiling vs. addfb race.
+ * intel_parent_frontbuffer_get() must be done before
+ * intel_bo_framebuffer_init() to avoid set_tiling vs. addfb race.
*/
- intel_fb->frontbuffer = intel_frontbuffer_get(obj);
+ intel_fb->frontbuffer = intel_parent_frontbuffer_get(display, obj);
if (!intel_fb->frontbuffer) {
ret = -ENOMEM;
goto err_free_panic;
}
- ret = intel_fb_bo_framebuffer_init(obj, mode_cmd);
+ ret = intel_bo_framebuffer_init(obj, mode_cmd);
if (ret)
goto err_frontbuffer_put;
@@ -2304,16 +2303,21 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
goto err_bo_framebuffer_fini;
if (intel_fb_uses_dpt(fb)) {
- struct i915_address_space *vm;
+ struct drm_gem_object *obj = intel_fb_bo(&intel_fb->base);
+ struct intel_dpt *dpt;
+ size_t size = 0;
+
+ if (intel_fb_needs_pot_stride_remap(intel_fb))
+ size = intel_remapped_info_size(&intel_fb->remapped_view.gtt.remapped);
- vm = intel_dpt_create(intel_fb);
- if (IS_ERR(vm)) {
+ dpt = intel_parent_dpt_create(display, obj, size);
+ if (IS_ERR(dpt)) {
drm_dbg_kms(display->drm, "failed to create DPT\n");
- ret = PTR_ERR(vm);
+ ret = PTR_ERR(dpt);
goto err_frontbuffer_put;
}
- intel_fb->dpt_vm = vm;
+ intel_fb->dpt = dpt;
}
ret = drm_framebuffer_init(display->drm, fb, &intel_fb_funcs);
@@ -2326,11 +2330,11 @@ int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
err_free_dpt:
if (intel_fb_uses_dpt(fb))
- intel_dpt_destroy(intel_fb->dpt_vm);
+ intel_parent_dpt_destroy(display, intel_fb->dpt);
err_bo_framebuffer_fini:
- intel_fb_bo_framebuffer_fini(obj);
+ intel_bo_framebuffer_fini(obj);
err_frontbuffer_put:
- intel_frontbuffer_put(intel_fb->frontbuffer);
+ intel_parent_frontbuffer_put(display, intel_fb->frontbuffer);
err_free_panic:
kfree(intel_fb->panic);
@@ -2343,11 +2347,12 @@ intel_user_framebuffer_create(struct drm_device *dev,
const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *user_mode_cmd)
{
+ struct intel_display *display = to_intel_display(dev);
struct drm_framebuffer *fb;
struct drm_gem_object *obj;
struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
- obj = intel_fb_bo_lookup_valid_bo(dev, filp, &mode_cmd);
+ obj = intel_bo_framebuffer_lookup(display, filp, &mode_cmd);
if (IS_ERR(obj))
return ERR_CAST(obj);
diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.c b/drivers/gpu/drm/i915/display/intel_fb_bo.c
deleted file mode 100644
index bfecd73d5fa0..000000000000
--- a/drivers/gpu/drm/i915/display/intel_fb_bo.c
+++ /dev/null
@@ -1,101 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2021 Intel Corporation
- */
-
-#include <drm/drm_framebuffer.h>
-#include <drm/drm_print.h>
-
-#include "gem/i915_gem_object.h"
-
-#include "i915_drv.h"
-#include "intel_display_core.h"
-#include "intel_display_types.h"
-#include "intel_fb.h"
-#include "intel_fb_bo.h"
-
-void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj)
-{
- /* Nothing to do for i915 */
-}
-
-int intel_fb_bo_framebuffer_init(struct drm_gem_object *_obj,
- struct drm_mode_fb_cmd2 *mode_cmd)
-{
- struct drm_i915_gem_object *obj = to_intel_bo(_obj);
- struct intel_display *display = to_intel_display(obj->base.dev);
- unsigned int tiling, stride;
-
- i915_gem_object_lock(obj, NULL);
- tiling = i915_gem_object_get_tiling(obj);
- stride = i915_gem_object_get_stride(obj);
- i915_gem_object_unlock(obj);
-
- if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
- /*
- * If there's a fence, enforce that
- * the fb modifier and tiling mode match.
- */
- if (tiling != I915_TILING_NONE &&
- tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
- drm_dbg_kms(display->drm,
- "tiling_mode doesn't match fb modifier\n");
- return -EINVAL;
- }
- } else {
- if (tiling == I915_TILING_X) {
- mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
- } else if (tiling == I915_TILING_Y) {
- drm_dbg_kms(display->drm,
- "No Y tiling for legacy addfb\n");
- return -EINVAL;
- }
- }
-
- /*
- * gen2/3 display engine uses the fence if present,
- * so the tiling mode must match the fb modifier exactly.
- */
- if (DISPLAY_VER(display) < 4 &&
- tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
- drm_dbg_kms(display->drm,
- "tiling_mode must match fb modifier exactly on gen2/3\n");
- return -EINVAL;
- }
-
- /*
- * If there's a fence, enforce that
- * the fb pitch and fence stride match.
- */
- if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
- drm_dbg_kms(display->drm,
- "pitch (%d) must match tiling stride (%d)\n",
- mode_cmd->pitches[0], stride);
- return -EINVAL;
- }
-
- return 0;
-}
-
-struct drm_gem_object *
-intel_fb_bo_lookup_valid_bo(struct drm_device *drm,
- struct drm_file *filp,
- const struct drm_mode_fb_cmd2 *mode_cmd)
-{
- struct drm_i915_private *i915 = to_i915(drm);
- struct drm_i915_gem_object *obj;
-
- obj = i915_gem_object_lookup(filp, mode_cmd->handles[0]);
- if (!obj)
- return ERR_PTR(-ENOENT);
-
- /* object is backed with LMEM for discrete */
- if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM_0)) {
- /* object is "remote", not in local memory */
- i915_gem_object_put(obj);
- drm_dbg_kms(&i915->drm, "framebuffer must reside in local memory\n");
- return ERR_PTR(-EREMOTE);
- }
-
- return intel_bo_to_drm_bo(obj);
-}
diff --git a/drivers/gpu/drm/i915/display/intel_fb_bo.h b/drivers/gpu/drm/i915/display/intel_fb_bo.h
deleted file mode 100644
index d775773c6c03..000000000000
--- a/drivers/gpu/drm/i915/display/intel_fb_bo.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2021 Intel Corporation
- */
-
-#ifndef __INTEL_FB_BO_H__
-#define __INTEL_FB_BO_H__
-
-struct drm_device;
-struct drm_file;
-struct drm_framebuffer;
-struct drm_gem_object;
-struct drm_mode_fb_cmd2;
-
-void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj);
-
-int intel_fb_bo_framebuffer_init(struct drm_gem_object *obj,
- struct drm_mode_fb_cmd2 *mode_cmd);
-
-struct drm_gem_object *
-intel_fb_bo_lookup_valid_bo(struct drm_device *drm,
- struct drm_file *filp,
- const struct drm_mode_fb_cmd2 *user_mode_cmd);
-
-#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c
index 7249b784fbba..738d77a1468a 100644
--- a/drivers/gpu/drm/i915/display/intel_fb_pin.c
+++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c
@@ -17,7 +17,7 @@
#include "intel_display_core.h"
#include "intel_display_rpm.h"
#include "intel_display_types.h"
-#include "intel_dpt.h"
+#include "i915_dpt.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_plane.h"
@@ -27,13 +27,14 @@ intel_fb_pin_to_dpt(const struct drm_framebuffer *fb,
const struct i915_gtt_view *view,
unsigned int alignment,
unsigned long *out_flags,
- struct i915_address_space *vm)
+ struct intel_dpt *dpt)
{
struct drm_device *dev = fb->dev;
struct intel_display *display = to_intel_display(dev);
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_gem_object *_obj = intel_fb_bo(fb);
struct drm_i915_gem_object *obj = to_intel_bo(_obj);
+ struct i915_address_space *vm = i915_dpt_to_vm(dpt);
struct i915_gem_ww_ctx ww;
struct i915_vma *vma;
int ret;
@@ -284,7 +285,7 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state,
} else {
unsigned int alignment = intel_plane_fb_min_alignment(plane_state);
- vma = intel_dpt_pin_to_ggtt(fb->dpt_vm, alignment / 512);
+ vma = i915_dpt_pin_to_ggtt(fb->dpt, alignment / 512);
if (IS_ERR(vma))
return PTR_ERR(vma);
@@ -292,9 +293,9 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state,
vma = intel_fb_pin_to_dpt(&fb->base, &plane_state->view.gtt,
alignment, &plane_state->flags,
- fb->dpt_vm);
+ fb->dpt);
if (IS_ERR(vma)) {
- intel_dpt_unpin_from_ggtt(fb->dpt_vm);
+ i915_dpt_unpin_from_ggtt(fb->dpt);
plane_state->ggtt_vma = NULL;
return PTR_ERR(vma);
}
@@ -307,7 +308,7 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state,
* The DPT object contains only one vma, and there is no VT-d
* guard, so the VMA's offset within the DPT is always 0.
*/
- drm_WARN_ON(display->drm, intel_dpt_offset(plane_state->dpt_vma));
+ drm_WARN_ON(display->drm, i915_dpt_offset(plane_state->dpt_vma));
}
/*
@@ -346,7 +347,7 @@ void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
vma = fetch_and_zero(&old_plane_state->ggtt_vma);
if (vma)
- intel_dpt_unpin_from_ggtt(fb->dpt_vm);
+ i915_dpt_unpin_from_ggtt(fb->dpt);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index 91de38379282..ea0ce00c8474 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -45,7 +45,6 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_print.h>
-#include "i915_vma.h"
#include "i9xx_plane_regs.h"
#include "intel_de.h"
#include "intel_display_device.h"
@@ -184,7 +183,7 @@ static unsigned int skl_fbc_min_cfb_stride(struct intel_display *display,
* Wa_16011863758: icl+
* Avoid some hardware segment address miscalculation.
*/
- if (DISPLAY_VER(display) >= 11)
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_16011863758))
stride += 64;
/*
@@ -950,7 +949,7 @@ static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
}
/* Wa_1409120013:icl,jsl,tgl,dg1 */
- if (IS_DISPLAY_VER(display, 11, 12))
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_1409120013))
intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id),
0, DPFC_CHICKEN_COMP_DUMMY_PIXEL);
/*
@@ -958,7 +957,7 @@ static void intel_fbc_program_workarounds(struct intel_fbc *fbc)
* Fixes: Screen flicker with FBC and Package C state enabled
* Workaround: Forced SLB invalidation before start of new frame.
*/
- if (intel_display_wa(display, 22014263786))
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_22014263786))
intel_de_rmw(display, ILK_DPFC_CHICKEN(fbc->id),
0, DPFC_CHICKEN_FORCE_SLB_INVALIDATION);
@@ -980,7 +979,7 @@ static void fbc_sys_cache_update_config(struct intel_display *display, u32 reg,
* Fixes: SoC hardware issue in read caching
* Workaround: disable cache read setting which is enabled by default.
*/
- if (!intel_display_wa(display, 14025769978))
+ if (!intel_display_wa(display, INTEL_DISPLAY_WA_14025769978))
/* Cache read enable is set by default */
reg |= FBC_SYS_CACHE_READ_ENABLE;
@@ -1463,7 +1462,7 @@ static void intel_fbc_update_state(struct intel_atomic_state *state,
!intel_fbc_has_fences(display));
if (plane_state->flags & PLANE_HAS_FENCE)
- fbc_state->fence_id = i915_vma_fence_id(plane_state->ggtt_vma);
+ fbc_state->fence_id = intel_parent_vma_fence_id(display, plane_state->ggtt_vma);
else
fbc_state->fence_id = -1;
@@ -1490,7 +1489,7 @@ static bool intel_fbc_is_fence_ok(const struct intel_plane_state *plane_state)
*/
return DISPLAY_VER(display) >= 9 ||
(plane_state->flags & PLANE_HAS_FENCE &&
- i915_vma_fence_id(plane_state->ggtt_vma) != -1);
+ intel_parent_vma_fence_id(display, plane_state->ggtt_vma) != -1);
}
static bool intel_fbc_is_cfb_ok(const struct intel_plane_state *plane_state)
@@ -1613,7 +1612,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
return 0;
}
- if (intel_display_wa(display, 16023588340)) {
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_16023588340)) {
plane_state->no_fbc_reason = "Wa_16023588340";
return 0;
}
@@ -1623,7 +1622,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
* Fixes: Underrun during media decode
* Workaround: Do not enable FBC
*/
- if (intel_display_wa(display, 15018326506)) {
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_15018326506)) {
plane_state->no_fbc_reason = "Wa_15018326506";
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 44f4fcce526e..bdaaf3edba0c 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -47,7 +47,6 @@
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
-#include "i915_vma.h"
#include "intel_bo.h"
#include "intel_display_core.h"
#include "intel_display_rpm.h"
@@ -343,9 +342,7 @@ int intel_fbdev_driver_fbdev_probe(struct drm_fb_helper *helper,
/* Use default scratch pixmap (info->pixmap.flags = FB_PIXMAP_SYSTEM) */
- drm_dbg_kms(display->drm, "allocated %dx%d fb: 0x%08x\n",
- fb->base.width, fb->base.height,
- i915_ggtt_offset(vma));
+ drm_dbg_kms(display->drm, "allocated %dx%d fb\n", fb->base.width, fb->base.height);
ifbdev->fb = fb;
ifbdev->vma = vma;
ifbdev->vma_flags = flags;
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
index c3202ba141c5..6f84eb6355de 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.c
@@ -17,28 +17,40 @@ u32 intel_fbdev_fb_pitch_align(u32 stride)
return ALIGN(stride, 64);
}
+bool intel_fbdev_fb_prefer_stolen(struct drm_device *drm, unsigned int size)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+
+ /* Skip stolen on MTL as Wa_22018444074 mitigation. */
+ if (IS_METEORLAKE(i915))
+ return false;
+
+ /*
+ * If the FB is too big, just don't use it since fbdev is not very
+ * important and we should probably use that space with FBC or other
+ * features.
+ */
+ return i915->dsm.usable_size >= size * 2;
+}
+
struct drm_gem_object *intel_fbdev_fb_bo_create(struct drm_device *drm, int size)
{
- struct drm_i915_private *dev_priv = to_i915(drm);
+ struct drm_i915_private *i915 = to_i915(drm);
struct drm_i915_gem_object *obj;
obj = ERR_PTR(-ENODEV);
- if (HAS_LMEM(dev_priv)) {
- obj = i915_gem_object_create_lmem(dev_priv, size,
+ if (HAS_LMEM(i915)) {
+ obj = i915_gem_object_create_lmem(i915, size,
I915_BO_ALLOC_CONTIGUOUS |
I915_BO_ALLOC_USER);
} else {
- /*
- * If the FB is too big, just don't use it since fbdev is not very
- * important and we should probably use that space with FBC or other
- * features.
- *
- * Also skip stolen on MTL as Wa_22018444074 mitigation.
- */
- if (!IS_METEORLAKE(dev_priv) && size * 2 < dev_priv->dsm.usable_size)
- obj = i915_gem_object_create_stolen(dev_priv, size);
+ if (intel_fbdev_fb_prefer_stolen(drm, size))
+ obj = i915_gem_object_create_stolen(i915, size);
+ else
+ drm_info(drm, "Allocating fbdev: Stolen memory not preferred.\n");
+
if (IS_ERR(obj))
- obj = i915_gem_object_create_shmem(dev_priv, size);
+ obj = i915_gem_object_create_shmem(i915, size);
}
if (IS_ERR(obj)) {
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
index fd0b3775dc1f..34ed2b9c2b4f 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
+++ b/drivers/gpu/drm/i915/display/intel_fbdev_fb.h
@@ -19,5 +19,6 @@ struct drm_gem_object *intel_fbdev_fb_bo_create(struct drm_device *drm, int size
void intel_fbdev_fb_bo_destroy(struct drm_gem_object *obj);
int intel_fbdev_fb_fill_info(struct drm_device *drm, struct fb_info *info,
struct drm_gem_object *obj, struct i915_vma *vma);
+bool intel_fbdev_fb_prefer_stolen(struct drm_device *drm, unsigned int size);
#endif
diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c
index 5bb0090dd5ed..24ce8a7842c7 100644
--- a/drivers/gpu/drm/i915/display/intel_fdi.c
+++ b/drivers/gpu/drm/i915/display/intel_fdi.c
@@ -8,7 +8,6 @@
#include <drm/drm_fixed.h>
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
#include "intel_ddi.h"
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index b413b3e871d8..bf047180def9 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -29,7 +29,6 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_flipq.c b/drivers/gpu/drm/i915/display/intel_flipq.c
index 1e9550cb66a3..253dc2e96d2d 100644
--- a/drivers/gpu/drm/i915/display/intel_flipq.c
+++ b/drivers/gpu/drm/i915/display/intel_flipq.c
@@ -12,6 +12,7 @@
#include "intel_display_core.h"
#include "intel_display_types.h"
#include "intel_display_utils.h"
+#include "intel_display_wa.h"
#include "intel_dmc.h"
#include "intel_dmc_regs.h"
#include "intel_dsb.h"
@@ -447,19 +448,11 @@ void intel_flipq_add(struct intel_crtc *crtc,
intel_flipq_sw_dmc_wake(crtc);
}
-/* Wa_18034343758 */
-static bool need_dmc_halt_wa(struct intel_display *display)
-{
- return DISPLAY_VER(display) == 20 ||
- (display->platform.pantherlake &&
- IS_DISPLAY_STEP(display, STEP_A0, STEP_B0));
-}
-
void intel_flipq_wait_dmc_halt(struct intel_dsb *dsb, struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- if (need_dmc_halt_wa(display))
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_18034343758))
intel_dsb_wait_usec(dsb, 2);
}
@@ -467,6 +460,6 @@ void intel_flipq_unhalt_dmc(struct intel_dsb *dsb, struct intel_crtc *crtc)
{
struct intel_display *display = to_intel_display(crtc);
- if (need_dmc_halt_wa(display))
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_18034343758))
intel_dsb_reg_write(dsb, PIPEDMC_CTL(crtc->pipe), 0);
}
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 03c4978fa5ec..705742e117ca 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -58,13 +58,13 @@
#include <drm/drm_gem.h>
#include <drm/drm_print.h>
-#include "intel_bo.h"
#include "intel_display_trace.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_drrs.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
+#include "intel_parent.h"
#include "intel_psr.h"
#include "intel_tdf.h"
@@ -123,9 +123,9 @@ void intel_frontbuffer_flip(struct intel_display *display,
frontbuffer_flush(display, frontbuffer_bits, ORIGIN_FLIP);
}
-void __intel_fb_invalidate(struct intel_frontbuffer *front,
- enum fb_op_origin origin,
- unsigned int frontbuffer_bits)
+void __intel_frontbuffer_invalidate(struct intel_frontbuffer *front,
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits)
{
struct intel_display *display = front->display;
@@ -143,14 +143,14 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front,
intel_fbc_invalidate(display, frontbuffer_bits, origin);
}
-void __intel_fb_flush(struct intel_frontbuffer *front,
- enum fb_op_origin origin,
- unsigned int frontbuffer_bits)
+void __intel_frontbuffer_flush(struct intel_frontbuffer *front,
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits)
{
struct intel_display *display = front->display;
if (origin == ORIGIN_DIRTYFB)
- intel_bo_frontbuffer_flush_for_display(front);
+ intel_parent_frontbuffer_flush_for_display(display, front);
if (origin == ORIGIN_CS) {
spin_lock(&display->fb_tracking.lock);
@@ -164,18 +164,13 @@ void __intel_fb_flush(struct intel_frontbuffer *front,
frontbuffer_flush(display, frontbuffer_bits, origin);
}
-static void intel_frontbuffer_ref(struct intel_frontbuffer *front)
-{
- intel_bo_frontbuffer_ref(front);
-}
-
static void intel_frontbuffer_flush_work(struct work_struct *work)
{
struct intel_frontbuffer *front =
container_of(work, struct intel_frontbuffer, flush_work);
intel_frontbuffer_flush(front, ORIGIN_DIRTYFB);
- intel_frontbuffer_put(front);
+ intel_parent_frontbuffer_put(front->display, front);
}
/**
@@ -190,9 +185,9 @@ void intel_frontbuffer_queue_flush(struct intel_frontbuffer *front)
if (!front)
return;
- intel_frontbuffer_ref(front);
+ intel_parent_frontbuffer_ref(front->display, front);
if (!schedule_work(&front->flush_work))
- intel_frontbuffer_put(front);
+ intel_parent_frontbuffer_put(front->display, front);
}
void intel_frontbuffer_init(struct intel_frontbuffer *front, struct drm_device *drm)
@@ -207,16 +202,6 @@ void intel_frontbuffer_fini(struct intel_frontbuffer *front)
drm_WARN_ON(front->display->drm, atomic_read(&front->bits));
}
-struct intel_frontbuffer *intel_frontbuffer_get(struct drm_gem_object *obj)
-{
- return intel_bo_frontbuffer_get(obj);
-}
-
-void intel_frontbuffer_put(struct intel_frontbuffer *front)
-{
- intel_bo_frontbuffer_put(front);
-}
-
/**
* intel_frontbuffer_track - update frontbuffer tracking
* @old: current buffer for the frontbuffer slots
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.h b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
index 22677acb4c06..a89ce352b12b 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.h
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.h
@@ -66,14 +66,9 @@ struct intel_frontbuffer {
void intel_frontbuffer_flip(struct intel_display *display,
unsigned frontbuffer_bits);
-void intel_frontbuffer_put(struct intel_frontbuffer *front);
-
-struct intel_frontbuffer *
-intel_frontbuffer_get(struct drm_gem_object *obj);
-
-void __intel_fb_invalidate(struct intel_frontbuffer *front,
- enum fb_op_origin origin,
- unsigned int frontbuffer_bits);
+void __intel_frontbuffer_invalidate(struct intel_frontbuffer *front,
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits);
/**
* intel_frontbuffer_invalidate - invalidate frontbuffer object
@@ -98,13 +93,13 @@ static inline bool intel_frontbuffer_invalidate(struct intel_frontbuffer *front,
if (!frontbuffer_bits)
return false;
- __intel_fb_invalidate(front, origin, frontbuffer_bits);
+ __intel_frontbuffer_invalidate(front, origin, frontbuffer_bits);
return true;
}
-void __intel_fb_flush(struct intel_frontbuffer *front,
- enum fb_op_origin origin,
- unsigned int frontbuffer_bits);
+void __intel_frontbuffer_flush(struct intel_frontbuffer *front,
+ enum fb_op_origin origin,
+ unsigned int frontbuffer_bits);
/**
* intel_frontbuffer_flush - flush frontbuffer object
@@ -126,7 +121,7 @@ static inline void intel_frontbuffer_flush(struct intel_frontbuffer *front,
if (!frontbuffer_bits)
return;
- __intel_fb_flush(front, origin, frontbuffer_bits);
+ __intel_frontbuffer_flush(front, origin, frontbuffer_bits);
}
void intel_frontbuffer_queue_flush(struct intel_frontbuffer *front);
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index a7bce0c6a17e..df48f27f1cc1 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -35,7 +35,6 @@
#include <drm/drm_print.h>
#include <drm/display/drm_hdcp_helper.h>
-#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
@@ -251,7 +250,7 @@ static u32 get_reserved(struct intel_gmbus *bus)
preserve_bits |= GPIO_DATA_PULLUP_DISABLE | GPIO_CLOCK_PULLUP_DISABLE;
/* Wa_16025573575: the masks bits need to be preserved through out */
- if (intel_display_wa(display, 16025573575))
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_16025573575))
preserve_bits |= GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_VAL_MASK |
GPIO_DATA_DIR_MASK | GPIO_DATA_VAL_MASK;
@@ -343,7 +342,7 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter)
if (display->platform.pineview)
pnv_gmbus_clock_gating(display, false);
- if (intel_display_wa(display, 16025573575))
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_16025573575))
ptl_handle_mask_bits(bus, true);
set_data(bus, 1);
@@ -364,7 +363,7 @@ intel_gpio_post_xfer(struct i2c_adapter *adapter)
if (display->platform.pineview)
pnv_gmbus_clock_gating(display, true);
- if (intel_display_wa(display, 16025573575))
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_16025573575))
ptl_handle_mask_bits(bus, false);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 555d35bae887..892eab4b6f92 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -17,8 +17,8 @@
#include <drm/display/drm_hdcp_helper.h>
#include <drm/drm_print.h>
#include <drm/intel/i915_component.h>
+#include <drm/intel/intel_pcode_regs.h>
-#include "i915_reg.h"
#include "intel_connector.h"
#include "intel_de.h"
#include "intel_display_jiffies.h"
@@ -33,7 +33,6 @@
#include "intel_hdcp_regs.h"
#include "intel_hdcp_shim.h"
#include "intel_parent.h"
-#include "intel_pcode.h"
#include "intel_step.h"
#define USE_HDCP_GSC(__display) (DISPLAY_VER(__display) >= 14)
@@ -76,7 +75,6 @@ static int intel_conn_to_vcpi(struct intel_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr;
struct drm_dp_mst_atomic_payload *payload;
struct drm_dp_mst_topology_state *mst_state;
- int vcpi = 0;
/* For HDMI this is forced to be 0x0. For DP SST also this is 0x0. */
if (!connector->mst.port)
@@ -87,15 +85,9 @@ static int intel_conn_to_vcpi(struct intel_atomic_state *state,
mst_state = to_drm_dp_mst_topology_state(mgr->base.state);
payload = drm_atomic_get_mst_payload_state(mst_state, connector->mst.port);
if (drm_WARN_ON(mgr->dev, !payload))
- goto out;
+ return 0;
- vcpi = payload->vcpi;
- if (drm_WARN_ON(mgr->dev, vcpi < 0)) {
- vcpi = 0;
- goto out;
- }
-out:
- return vcpi;
+ return payload->vcpi;
}
/*
@@ -398,7 +390,7 @@ static int intel_hdcp_load_keys(struct intel_display *display)
* Mailbox interface.
*/
if (DISPLAY_VER(display) == 9 && !display->platform.broxton) {
- ret = intel_pcode_write(display->drm, SKL_PCODE_LOAD_HDCP_KEYS, 1);
+ ret = intel_parent_pcode_write(display, SKL_PCODE_LOAD_HDCP_KEYS, 1);
if (ret) {
drm_err(display->drm,
"Failed to initiate HDCP key load (%d)\n",
@@ -2239,7 +2231,7 @@ static void intel_hdcp_check_work(struct work_struct *work)
if (drm_connector_is_unregistered(&connector->base))
return;
- if (!intel_hdcp2_check_link(connector))
+ if (!hdcp->force_hdcp14 && !intel_hdcp2_check_link(connector))
queue_delayed_work(display->wq.unordered, &hdcp->check_work,
DRM_HDCP2_CHECK_PERIOD_MS);
else if (!intel_hdcp_check_link(connector))
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 055e68810d0d..05e898d10a2b 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -56,6 +56,7 @@
#include "intel_display_types.h"
#include "intel_display_utils.h"
#include "intel_dp.h"
+#include "intel_dpll.h"
#include "intel_gmbus.h"
#include "intel_hdcp.h"
#include "intel_hdcp_regs.h"
@@ -70,16 +71,14 @@
bool intel_hdmi_is_frl(u32 clock)
{
- switch (clock) {
- case 300000: /* 3 Gbps */
- case 600000: /* 6 Gbps */
- case 800000: /* 8 Gbps */
- case 1000000: /* 10 Gbps */
- case 1200000: /* 12 Gbps */
- return true;
- default:
- return false;
- }
+ u32 rates[] = { 300000, 600000, 800000, 1000000, 1200000 };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(rates); i++)
+ if (intel_dpll_clock_matches(clock, rates[i]))
+ return true;
+
+ return false;
}
static void
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index 82c39e4ffa37..8865cb2ac569 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -5,7 +5,6 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_irq.h"
#include "intel_display_regs.h"
diff --git a/drivers/gpu/drm/i915/display/intel_lt_phy.c b/drivers/gpu/drm/i915/display/intel_lt_phy.c
index 6cdae03ee172..eced8493e566 100644
--- a/drivers/gpu/drm/i915/display/intel_lt_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_lt_phy.c
@@ -5,7 +5,6 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "intel_cx0_phy.h"
#include "intel_cx0_phy_regs.h"
#include "intel_ddi.h"
@@ -14,6 +13,7 @@
#include "intel_display.h"
#include "intel_display_types.h"
#include "intel_display_utils.h"
+#include "intel_dpll.h"
#include "intel_dpll_mgr.h"
#include "intel_hdmi.h"
#include "intel_lt_phy.h"
@@ -60,7 +60,6 @@ struct lt_phy_params {
};
static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_rbr = {
- .clock = 162000,
.config = {
0x83,
0x2d,
@@ -114,7 +113,6 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_rbr = {
};
static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr1 = {
- .clock = 270000,
.config = {
0x8b,
0x2d,
@@ -168,7 +166,6 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr1 = {
};
static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr2 = {
- .clock = 540000,
.config = {
0x93,
0x2d,
@@ -222,7 +219,6 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr2 = {
};
static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr3 = {
- .clock = 810000,
.config = {
0x9b,
0x2d,
@@ -276,7 +272,6 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_hbr3 = {
};
static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr10 = {
- .clock = 1000000,
.config = {
0x43,
0x2d,
@@ -330,7 +325,6 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr10 = {
};
static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr13_5 = {
- .clock = 1350000,
.config = {
0xcb,
0x2d,
@@ -384,7 +378,6 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr13_5 = {
};
static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr20 = {
- .clock = 2000000,
.config = {
0x53,
0x2d,
@@ -437,19 +430,35 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_dp_uhbr20 = {
},
};
-static const struct intel_lt_phy_pll_state * const xe3plpd_lt_dp_tables[] = {
- &xe3plpd_lt_dp_rbr,
- &xe3plpd_lt_dp_hbr1,
- &xe3plpd_lt_dp_hbr2,
- &xe3plpd_lt_dp_hbr3,
- &xe3plpd_lt_dp_uhbr10,
- &xe3plpd_lt_dp_uhbr13_5,
- &xe3plpd_lt_dp_uhbr20,
- NULL,
+struct intel_lt_phy_pll_params {
+ const char *name;
+ bool is_hdmi;
+ int clock_rate;
+ const struct intel_lt_phy_pll_state *state;
+};
+
+#define __LT_PHY_PLL_PARAMS(__is_hdmi, __clock_rate, __state) { \
+ .name = __stringify(__state), \
+ .is_hdmi = __is_hdmi, \
+ .clock_rate = __clock_rate, \
+ .state = &__state, \
+}
+
+#define LT_PHY_PLL_HDMI_PARAMS(__clock_rate, __state) __LT_PHY_PLL_PARAMS(true, __clock_rate, __state)
+#define LT_PHY_PLL_DP_PARAMS(__clock_rate, __state) __LT_PHY_PLL_PARAMS(false, __clock_rate, __state)
+
+static const struct intel_lt_phy_pll_params xe3plpd_lt_dp_tables[] = {
+ LT_PHY_PLL_DP_PARAMS(162000, xe3plpd_lt_dp_rbr),
+ LT_PHY_PLL_DP_PARAMS(270000, xe3plpd_lt_dp_hbr1),
+ LT_PHY_PLL_DP_PARAMS(540000, xe3plpd_lt_dp_hbr2),
+ LT_PHY_PLL_DP_PARAMS(810000, xe3plpd_lt_dp_hbr3),
+ LT_PHY_PLL_DP_PARAMS(1000000, xe3plpd_lt_dp_uhbr10),
+ LT_PHY_PLL_DP_PARAMS(1350000, xe3plpd_lt_dp_uhbr13_5),
+ LT_PHY_PLL_DP_PARAMS(2000000, xe3plpd_lt_dp_uhbr20),
+ {}
};
static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_2_16 = {
- .clock = 216000,
.config = {
0xa3,
0x2d,
@@ -503,7 +512,6 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_2_16 = {
};
static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_2_43 = {
- .clock = 243000,
.config = {
0xab,
0x2d,
@@ -557,7 +565,6 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_2_43 = {
};
static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_3_24 = {
- .clock = 324000,
.config = {
0xb3,
0x2d,
@@ -611,7 +618,6 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_3_24 = {
};
static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_4_32 = {
- .clock = 432000,
.config = {
0xbb,
0x2d,
@@ -665,7 +671,6 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_4_32 = {
};
static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_6_75 = {
- .clock = 675000,
.config = {
0xdb,
0x2d,
@@ -718,21 +723,20 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_edp_6_75 = {
},
};
-static const struct intel_lt_phy_pll_state * const xe3plpd_lt_edp_tables[] = {
- &xe3plpd_lt_dp_rbr,
- &xe3plpd_lt_edp_2_16,
- &xe3plpd_lt_edp_2_43,
- &xe3plpd_lt_dp_hbr1,
- &xe3plpd_lt_edp_3_24,
- &xe3plpd_lt_edp_4_32,
- &xe3plpd_lt_dp_hbr2,
- &xe3plpd_lt_edp_6_75,
- &xe3plpd_lt_dp_hbr3,
- NULL,
+static const struct intel_lt_phy_pll_params xe3plpd_lt_edp_tables[] = {
+ LT_PHY_PLL_DP_PARAMS(162000, xe3plpd_lt_dp_rbr),
+ LT_PHY_PLL_DP_PARAMS(216000, xe3plpd_lt_edp_2_16),
+ LT_PHY_PLL_DP_PARAMS(243000, xe3plpd_lt_edp_2_43),
+ LT_PHY_PLL_DP_PARAMS(270000, xe3plpd_lt_dp_hbr1),
+ LT_PHY_PLL_DP_PARAMS(324000, xe3plpd_lt_edp_3_24),
+ LT_PHY_PLL_DP_PARAMS(432000, xe3plpd_lt_edp_4_32),
+ LT_PHY_PLL_DP_PARAMS(540000, xe3plpd_lt_dp_hbr2),
+ LT_PHY_PLL_DP_PARAMS(675000, xe3plpd_lt_edp_6_75),
+ LT_PHY_PLL_DP_PARAMS(810000, xe3plpd_lt_dp_hbr3),
+ {}
};
static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_252 = {
- .clock = 25200,
.config = {
0x84,
0x2d,
@@ -785,62 +789,7 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_252 = {
},
};
-static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_272 = {
- .clock = 27200,
- .config = {
- 0x84,
- 0x2d,
- 0x0,
- },
- .addr_msb = {
- 0x87,
- 0x87,
- 0x87,
- 0x87,
- 0x88,
- 0x88,
- 0x88,
- 0x88,
- 0x88,
- 0x88,
- 0x88,
- 0x88,
- 0x88,
- },
- .addr_lsb = {
- 0x10,
- 0x0c,
- 0x14,
- 0xe4,
- 0x0c,
- 0x10,
- 0x14,
- 0x18,
- 0x48,
- 0x40,
- 0x4c,
- 0x24,
- 0x44,
- },
- .data = {
- { 0x0, 0x4c, 0x2, 0x0 },
- { 0x0b, 0x15, 0x26, 0xa0 },
- { 0x60, 0x0, 0x0, 0x0 },
- { 0x8, 0x4, 0x96, 0x28 },
- { 0xfa, 0x0c, 0x84, 0x11 },
- { 0x80, 0x0f, 0xd9, 0x53 },
- { 0x86, 0x0, 0x0, 0x0 },
- { 0x1, 0xa0, 0x1, 0x0 },
- { 0x4b, 0x0, 0x0, 0x0 },
- { 0x28, 0x0, 0x0, 0x0 },
- { 0x0, 0x14, 0x2a, 0x14 },
- { 0x0, 0x0, 0x0, 0x0 },
- { 0x0, 0x0, 0x0, 0x0 },
- },
-};
-
static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_742p5 = {
- .clock = 74250,
.config = {
0x84,
0x2d,
@@ -894,7 +843,6 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_742p5 = {
};
static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_1p485 = {
- .clock = 148500,
.config = {
0x84,
0x2d,
@@ -948,7 +896,6 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_1p485 = {
};
static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_5p94 = {
- .clock = 594000,
.config = {
0x84,
0x2d,
@@ -1001,13 +948,12 @@ static const struct intel_lt_phy_pll_state xe3plpd_lt_hdmi_5p94 = {
},
};
-static const struct intel_lt_phy_pll_state * const xe3plpd_lt_hdmi_tables[] = {
- &xe3plpd_lt_hdmi_252,
- &xe3plpd_lt_hdmi_272,
- &xe3plpd_lt_hdmi_742p5,
- &xe3plpd_lt_hdmi_1p485,
- &xe3plpd_lt_hdmi_5p94,
- NULL,
+static const struct intel_lt_phy_pll_params xe3plpd_lt_hdmi_tables[] = {
+ LT_PHY_PLL_HDMI_PARAMS(25200, xe3plpd_lt_hdmi_252),
+ LT_PHY_PLL_HDMI_PARAMS(74250, xe3plpd_lt_hdmi_742p5),
+ LT_PHY_PLL_HDMI_PARAMS(148500, xe3plpd_lt_hdmi_1p485),
+ LT_PHY_PLL_HDMI_PARAMS(594000, xe3plpd_lt_hdmi_5p94),
+ {}
};
static u8 intel_lt_phy_get_owned_lane_mask(struct intel_encoder *encoder)
@@ -1106,7 +1052,7 @@ static int __intel_lt_phy_p2p_write_once(struct intel_encoder *encoder,
* This is the time PHY takes to settle down after programming the PHY.
*/
udelay(150);
- intel_clear_response_ready_flag(encoder, lane);
+ intel_cx0_clear_response_ready_flag(encoder, lane);
intel_lt_phy_clear_status_p2p(encoder, lane);
return 0;
@@ -1346,7 +1292,7 @@ static void intel_lt_phy_transaction_end(struct intel_encoder *encoder, struct r
intel_display_power_put(display, POWER_DOMAIN_DC_OFF, wakeref);
}
-static const struct intel_lt_phy_pll_state * const *
+static const struct intel_lt_phy_pll_params *
intel_lt_phy_pll_tables_get(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
@@ -1680,7 +1626,8 @@ intel_lt_phy_calculate_hdmi_state(struct intel_lt_phy_pll_state *lt_state,
}
static int
-intel_lt_phy_calc_hdmi_port_clock(const struct intel_crtc_state *crtc_state)
+intel_lt_phy_calc_hdmi_port_clock(struct intel_display *display,
+ const struct intel_lt_phy_pll_state *lt_state)
{
#define REGVAL(i) ( \
(lt_state->data[i][3]) | \
@@ -1689,9 +1636,6 @@ intel_lt_phy_calc_hdmi_port_clock(const struct intel_crtc_state *crtc_state)
(lt_state->data[i][0] << 24) \
)
- struct intel_display *display = to_intel_display(crtc_state);
- const struct intel_lt_phy_pll_state *lt_state =
- &crtc_state->dpll_hw_state.ltpll;
int clk = 0;
u32 d8, pll_reg_5, pll_reg_3, pll_reg_57, m2div_frac, m2div_int;
u64 temp0, temp1;
@@ -1737,7 +1681,7 @@ intel_lt_phy_calc_hdmi_port_clock(const struct intel_crtc_state *crtc_state)
if (d8 == 0) {
drm_WARN_ON(display->drm,
"Invalid port clock using lowest HDMI portclock\n");
- return xe3plpd_lt_hdmi_252.clock;
+ return xe3plpd_lt_hdmi_tables[0].clock_rate;
}
m2div_int = (pll_reg_3 & REG_GENMASK(14, 5)) >> 5;
temp0 = ((u64)m2div_frac * REF_CLK_KHZ) >> 32;
@@ -1749,13 +1693,10 @@ intel_lt_phy_calc_hdmi_port_clock(const struct intel_crtc_state *crtc_state)
}
int
-intel_lt_phy_calc_port_clock(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state)
+intel_lt_phy_calc_port_clock(struct intel_display *display,
+ const struct intel_lt_phy_pll_state *lt_state)
{
- struct intel_display *display = to_intel_display(encoder);
int clk;
- const struct intel_lt_phy_pll_state *lt_state =
- &crtc_state->dpll_hw_state.ltpll;
u8 mode, rate;
mode = REG_FIELD_GET8(LT_PHY_VDR_MODE_ENCODING_MASK,
@@ -1771,10 +1712,10 @@ intel_lt_phy_calc_port_clock(struct intel_encoder *encoder,
lt_state->config[0]);
clk = intel_lt_phy_get_dp_clock(rate);
} else if (mode == MODE_HDMI_20) {
- clk = intel_lt_phy_calc_hdmi_port_clock(crtc_state);
+ clk = intel_lt_phy_calc_hdmi_port_clock(display, lt_state);
} else {
drm_WARN_ON(display->drm, "Unsupported LT PHY Mode!\n");
- clk = xe3plpd_lt_hdmi_252.clock;
+ clk = 25200;
}
return clk;
@@ -1784,16 +1725,20 @@ int
intel_lt_phy_pll_calc_state(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder)
{
- const struct intel_lt_phy_pll_state * const *tables;
+ struct intel_display *display = to_intel_display(crtc_state);
+ const struct intel_lt_phy_pll_params *tables;
int i;
tables = intel_lt_phy_pll_tables_get(crtc_state, encoder);
if (!tables)
return -EINVAL;
- for (i = 0; tables[i]; i++) {
- if (crtc_state->port_clock == tables[i]->clock) {
- crtc_state->dpll_hw_state.ltpll = *tables[i];
+ for (i = 0; tables[i].name; i++) {
+ int clock = intel_lt_phy_calc_port_clock(display, tables[i].state);
+
+ drm_WARN_ON(display->drm, !intel_dpll_clock_matches(clock, tables[i].clock_rate));
+ if (intel_dpll_clock_matches(crtc_state->port_clock, clock)) {
+ crtc_state->dpll_hw_state.ltpll = *tables[i].state;
if (intel_crtc_has_dp_encoder(crtc_state)) {
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_EDP))
crtc_state->dpll_hw_state.ltpll.config[2] = 1;
@@ -2212,6 +2157,9 @@ bool
intel_lt_phy_pll_compare_hw_state(const struct intel_lt_phy_pll_state *a,
const struct intel_lt_phy_pll_state *b)
{
+ if (a->tbt_mode || b->tbt_mode)
+ return true;
+
/*
* With LT PHY values other than VDR0_CONFIG and VDR2_CONFIG are
* unreliable. They cannot always be read back since internally
@@ -2254,8 +2202,6 @@ void intel_lt_phy_pll_readout_hw_state(struct intel_encoder *encoder,
LT_PHY_VDR_X_DATAY(i, j));
}
- pll_state->clock =
- intel_lt_phy_calc_port_clock(encoder, crtc_state);
intel_lt_phy_transaction_end(encoder, wakeref);
}
@@ -2319,3 +2265,66 @@ void intel_xe3plpd_pll_disable(struct intel_encoder *encoder)
intel_lt_phy_pll_disable(encoder);
}
+
+static void intel_lt_phy_pll_verify_clock(struct intel_display *display,
+ int precomputed_clock,
+ const char *pll_state_name,
+ const struct intel_lt_phy_pll_state *pll_state,
+ bool is_precomputed_state)
+{
+ struct drm_printer p;
+ int clock;
+
+ clock = intel_lt_phy_calc_port_clock(display, pll_state);
+
+ if (intel_dpll_clock_matches(clock, precomputed_clock))
+ return;
+
+ drm_warn(display->drm,
+ "PLL state %s (%s): clock difference too high: computed %d, pre-computed %d\n",
+ pll_state_name,
+ is_precomputed_state ? "precomputed" : "computed",
+ clock, precomputed_clock);
+
+ if (!drm_debug_enabled(DRM_UT_KMS))
+ return;
+
+ p = drm_dbg_printer(display->drm, DRM_UT_KMS, NULL);
+
+ drm_printf(&p, "PLL state %s (%s):\n",
+ pll_state_name,
+ is_precomputed_state ? "precomputed" : "computed");
+ intel_lt_phy_dump_hw_state(display, pll_state);
+}
+
+static void intel_lt_phy_pll_verify_params(struct intel_display *display,
+ const struct intel_lt_phy_pll_params *pll_params)
+{
+ struct intel_lt_phy_pll_state pll_state;
+
+ intel_lt_phy_pll_verify_clock(display, pll_params->clock_rate, pll_params->name, pll_params->state, true);
+
+ if (!pll_params->is_hdmi)
+ return;
+
+ if (intel_lt_phy_calculate_hdmi_state(&pll_state, pll_params->clock_rate) != 0)
+ return;
+
+ intel_lt_phy_pll_verify_clock(display, pll_params->clock_rate, pll_params->name, &pll_state, false);
+}
+
+static void intel_lt_phy_pll_verify_tables(struct intel_display *display,
+ const struct intel_lt_phy_pll_params *tables)
+{
+ int i;
+
+ for (i = 0; tables[i].name; i++)
+ intel_lt_phy_pll_verify_params(display, &tables[i]);
+}
+
+void intel_lt_phy_verify_plls(struct intel_display *display)
+{
+ intel_lt_phy_pll_verify_tables(display, xe3plpd_lt_dp_tables);
+ intel_lt_phy_pll_verify_tables(display, xe3plpd_lt_edp_tables);
+ intel_lt_phy_pll_verify_tables(display, xe3plpd_lt_hdmi_tables);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_lt_phy.h b/drivers/gpu/drm/i915/display/intel_lt_phy.h
index bf41858f1bc3..db905668f86d 100644
--- a/drivers/gpu/drm/i915/display/intel_lt_phy.h
+++ b/drivers/gpu/drm/i915/display/intel_lt_phy.h
@@ -21,8 +21,8 @@ void intel_lt_phy_pll_disable(struct intel_encoder *encoder);
int
intel_lt_phy_pll_calc_state(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder);
-int intel_lt_phy_calc_port_clock(struct intel_encoder *encoder,
- const struct intel_crtc_state *crtc_state);
+int intel_lt_phy_calc_port_clock(struct intel_display *display,
+ const struct intel_lt_phy_pll_state *lt_state);
void intel_lt_phy_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_lt_phy_dump_hw_state(struct intel_display *display,
@@ -41,5 +41,6 @@ intel_lt_phy_calculate_hdmi_state(struct intel_lt_phy_pll_state *lt_state,
void intel_xe3plpd_pll_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_xe3plpd_pll_disable(struct intel_encoder *encoder);
+void intel_lt_phy_verify_plls(struct intel_display *display);
#endif /* __INTEL_LT_PHY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_lt_phy_regs.h b/drivers/gpu/drm/i915/display/intel_lt_phy_regs.h
index 37e46fb9abde..ff6d7829dbb9 100644
--- a/drivers/gpu/drm/i915/display/intel_lt_phy_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_lt_phy_regs.h
@@ -6,12 +6,12 @@
#ifndef __INTEL_LT_PHY_REGS_H__
#define __INTEL_LT_PHY_REGS_H__
-#define XE3PLPD_MSGBUS_TIMEOUT_FAST_US 500
+#define XE3PLPD_MSGBUS_TIMEOUT_FAST_US 500
#define XE3PLPD_MACCLK_TURNON_LATENCY_MS 2
-#define XE3PLPD_MACCLK_TURNOFF_LATENCY_US 1
+#define XE3PLPD_MACCLK_TURNOFF_LATENCY_US 10
#define XE3PLPD_RATE_CALIB_DONE_LATENCY_MS 1
-#define XE3PLPD_RESET_START_LATENCY_US 10
-#define XE3PLPD_PWRDN_TO_RDY_LATENCY_US 4
+#define XE3PLPD_RESET_START_LATENCY_US 10
+#define XE3PLPD_PWRDN_TO_RDY_LATENCY_US 10
#define XE3PLPD_RESET_END_LATENCY_MS 2
/* LT Phy MAC Register */
diff --git a/drivers/gpu/drm/i915/display/intel_modeset_setup.c b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
index d10cbf69a5f8..4086f16a12bf 100644
--- a/drivers/gpu/drm/i915/display/intel_modeset_setup.c
+++ b/drivers/gpu/drm/i915/display/intel_modeset_setup.c
@@ -11,7 +11,6 @@
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
-#include "i915_reg.h"
#include "i9xx_wm.h"
#include "intel_atomic.h"
#include "intel_bw.h"
@@ -26,6 +25,7 @@
#include "intel_display_power.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
+#include "intel_display_wa.h"
#include "intel_dmc.h"
#include "intel_fifo_underrun.h"
#include "intel_modeset_setup.h"
@@ -914,7 +914,7 @@ static void intel_early_display_was(struct intel_display *display)
* Display WA #1185 WaDisableDARBFClkGating:glk,icl,ehl,tgl
* Also known as Wa_14010480278.
*/
- if (IS_DISPLAY_VER(display, 10, 12))
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_14010480278))
intel_de_rmw(display, GEN9_CLKGATE_DIS_0, 0, DARBF_GATING_DIS);
/*
diff --git a/drivers/gpu/drm/i915/display/intel_oprom_regs.h b/drivers/gpu/drm/i915/display/intel_oprom_regs.h
new file mode 100644
index 000000000000..e6a6fb51b90c
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_oprom_regs.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef _INTEL_OPROM_REGS_H_
+#define _INTEL_OPROM_REGS_H_
+
+#define PRIMARY_SPI_TRIGGER _MMIO(0x102040)
+#define PRIMARY_SPI_ADDRESS _MMIO(0x102080)
+#define PRIMARY_SPI_REGIONID _MMIO(0x102084)
+#define SPI_STATIC_REGIONS _MMIO(0x102090)
+#define OPTIONROM_SPI_REGIONID_MASK REG_GENMASK(7, 0)
+#define OROM_OFFSET _MMIO(0x1020c0)
+#define OROM_OFFSET_MASK REG_GENMASK(20, 16)
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index 05c7545c49e5..12a325ceae6f 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -27,24 +27,16 @@
*/
#include <drm/drm_fourcc.h>
+#include <drm/drm_gem.h>
#include <drm/drm_print.h>
-#include "gem/i915_gem_internal.h"
-#include "gem/i915_gem_object_frontbuffer.h"
-#include "gem/i915_gem_pm.h"
-
-#include "gt/intel_gpu_commands.h"
-#include "gt/intel_ring.h"
-
-#include "i915_drv.h"
-#include "i915_reg.h"
#include "intel_color_regs.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_frontbuffer.h"
#include "intel_overlay.h"
-#include "intel_pci_config.h"
+#include "intel_parent.h"
#include "intel_pfit_regs.h"
/* Limits for overlay size. According to intel doc, the real limits are:
@@ -121,9 +113,6 @@
#define RGB8I_TO_COLORKEY(c) \
((((c) & 0xff) << 16) | (((c) & 0xff) << 8) | (((c) & 0xff) << 0))
-/* overlay flip addr flag */
-#define OFC_UPDATE 0x1
-
/* polyphase filter coefficients */
#define N_HORIZ_Y_TAPS 5
#define N_VERT_Y_TAPS 3
@@ -189,312 +178,16 @@ struct overlay_registers {
struct intel_overlay {
struct intel_display *display;
- struct intel_context *context;
struct intel_crtc *crtc;
- struct i915_vma *vma;
- struct i915_vma *old_vma;
- struct intel_frontbuffer *frontbuffer;
- bool active;
bool pfit_active;
u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
u32 color_key:24;
u32 color_key_enabled:1;
u32 brightness, contrast, saturation;
u32 old_xscale, old_yscale;
- /* register access */
- struct drm_i915_gem_object *reg_bo;
struct overlay_registers __iomem *regs;
- u32 flip_addr;
- /* flip handling */
- struct i915_active last_flip;
- void (*flip_complete)(struct intel_overlay *ovl);
};
-static void i830_overlay_clock_gating(struct intel_display *display,
- bool enable)
-{
- struct pci_dev *pdev = to_pci_dev(display->drm->dev);
- u8 val;
-
- /* WA_OVERLAY_CLKGATE:alm */
- if (enable)
- intel_de_write(display, DSPCLK_GATE_D, 0);
- else
- intel_de_write(display, DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
-
- /* WA_DISABLE_L2CACHE_CLOCK_GATING:alm */
- pci_bus_read_config_byte(pdev->bus,
- PCI_DEVFN(0, 0), I830_CLOCK_GATE, &val);
- if (enable)
- val &= ~I830_L2_CACHE_CLOCK_GATE_DISABLE;
- else
- val |= I830_L2_CACHE_CLOCK_GATE_DISABLE;
- pci_bus_write_config_byte(pdev->bus,
- PCI_DEVFN(0, 0), I830_CLOCK_GATE, val);
-}
-
-static struct i915_request *
-alloc_request(struct intel_overlay *overlay, void (*fn)(struct intel_overlay *))
-{
- struct i915_request *rq;
- int err;
-
- overlay->flip_complete = fn;
-
- rq = i915_request_create(overlay->context);
- if (IS_ERR(rq))
- return rq;
-
- err = i915_active_add_request(&overlay->last_flip, rq);
- if (err) {
- i915_request_add(rq);
- return ERR_PTR(err);
- }
-
- return rq;
-}
-
-/* overlay needs to be disable in OCMD reg */
-static int intel_overlay_on(struct intel_overlay *overlay)
-{
- struct intel_display *display = overlay->display;
- struct i915_request *rq;
- u32 *cs;
-
- drm_WARN_ON(display->drm, overlay->active);
-
- rq = alloc_request(overlay, NULL);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs)) {
- i915_request_add(rq);
- return PTR_ERR(cs);
- }
-
- overlay->active = true;
-
- if (display->platform.i830)
- i830_overlay_clock_gating(display, false);
-
- *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_ON;
- *cs++ = overlay->flip_addr | OFC_UPDATE;
- *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- i915_request_add(rq);
-
- return i915_active_wait(&overlay->last_flip);
-}
-
-static void intel_overlay_flip_prepare(struct intel_overlay *overlay,
- struct i915_vma *vma)
-{
- struct intel_display *display = overlay->display;
- enum pipe pipe = overlay->crtc->pipe;
- struct intel_frontbuffer *frontbuffer = NULL;
-
- drm_WARN_ON(display->drm, overlay->old_vma);
-
- if (vma)
- frontbuffer = intel_frontbuffer_get(intel_bo_to_drm_bo(vma->obj));
-
- intel_frontbuffer_track(overlay->frontbuffer, frontbuffer,
- INTEL_FRONTBUFFER_OVERLAY(pipe));
-
- if (overlay->frontbuffer)
- intel_frontbuffer_put(overlay->frontbuffer);
- overlay->frontbuffer = frontbuffer;
-
- overlay->old_vma = overlay->vma;
- if (vma)
- overlay->vma = i915_vma_get(vma);
- else
- overlay->vma = NULL;
-}
-
-/* overlay needs to be enabled in OCMD reg */
-static int intel_overlay_continue(struct intel_overlay *overlay,
- struct i915_vma *vma,
- bool load_polyphase_filter)
-{
- struct intel_display *display = overlay->display;
- struct i915_request *rq;
- u32 flip_addr = overlay->flip_addr;
- u32 tmp, *cs;
-
- drm_WARN_ON(display->drm, !overlay->active);
-
- if (load_polyphase_filter)
- flip_addr |= OFC_UPDATE;
-
- /* check for underruns */
- tmp = intel_de_read(display, DOVSTA);
- if (tmp & (1 << 17))
- drm_dbg(display->drm, "overlay underrun, DOVSTA: %x\n", tmp);
-
- rq = alloc_request(overlay, NULL);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- cs = intel_ring_begin(rq, 2);
- if (IS_ERR(cs)) {
- i915_request_add(rq);
- return PTR_ERR(cs);
- }
-
- *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
- *cs++ = flip_addr;
- intel_ring_advance(rq, cs);
-
- intel_overlay_flip_prepare(overlay, vma);
- i915_request_add(rq);
-
- return 0;
-}
-
-static void intel_overlay_release_old_vma(struct intel_overlay *overlay)
-{
- struct intel_display *display = overlay->display;
- struct i915_vma *vma;
-
- vma = fetch_and_zero(&overlay->old_vma);
- if (drm_WARN_ON(display->drm, !vma))
- return;
-
- intel_frontbuffer_flip(display, INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe));
-
- i915_vma_unpin(vma);
- i915_vma_put(vma);
-}
-
-static void
-intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
-{
- intel_overlay_release_old_vma(overlay);
-}
-
-static void intel_overlay_off_tail(struct intel_overlay *overlay)
-{
- struct intel_display *display = overlay->display;
-
- intel_overlay_release_old_vma(overlay);
-
- overlay->crtc->overlay = NULL;
- overlay->crtc = NULL;
- overlay->active = false;
-
- if (display->platform.i830)
- i830_overlay_clock_gating(display, true);
-}
-
-static void intel_overlay_last_flip_retire(struct i915_active *active)
-{
- struct intel_overlay *overlay =
- container_of(active, typeof(*overlay), last_flip);
-
- if (overlay->flip_complete)
- overlay->flip_complete(overlay);
-}
-
-/* overlay needs to be disabled in OCMD reg */
-static int intel_overlay_off(struct intel_overlay *overlay)
-{
- struct intel_display *display = overlay->display;
- struct i915_request *rq;
- u32 *cs, flip_addr = overlay->flip_addr;
-
- drm_WARN_ON(display->drm, !overlay->active);
-
- /*
- * According to intel docs the overlay hw may hang (when switching
- * off) without loading the filter coeffs. It is however unclear whether
- * this applies to the disabling of the overlay or to the switching off
- * of the hw. Do it in both cases.
- */
- flip_addr |= OFC_UPDATE;
-
- rq = alloc_request(overlay, intel_overlay_off_tail);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- cs = intel_ring_begin(rq, 6);
- if (IS_ERR(cs)) {
- i915_request_add(rq);
- return PTR_ERR(cs);
- }
-
- /* wait for overlay to go idle */
- *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
- *cs++ = flip_addr;
- *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
-
- /* turn overlay off */
- *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_OFF;
- *cs++ = flip_addr;
- *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
-
- intel_ring_advance(rq, cs);
-
- intel_overlay_flip_prepare(overlay, NULL);
- i915_request_add(rq);
-
- return i915_active_wait(&overlay->last_flip);
-}
-
-/*
- * Recover from an interruption due to a signal.
- * We have to be careful not to repeat work forever an make forward progress.
- */
-static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay)
-{
- return i915_active_wait(&overlay->last_flip);
-}
-
-/*
- * Wait for pending overlay flip and release old frame.
- * Needs to be called before the overlay register are changed
- * via intel_overlay_(un)map_regs.
- */
-static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
-{
- struct intel_display *display = overlay->display;
- struct i915_request *rq;
- u32 *cs;
-
- /*
- * Only wait if there is actually an old frame to release to
- * guarantee forward progress.
- */
- if (!overlay->old_vma)
- return 0;
-
- if (!(intel_de_read(display, GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) {
- intel_overlay_release_old_vid_tail(overlay);
- return 0;
- }
-
- rq = alloc_request(overlay, intel_overlay_release_old_vid_tail);
- if (IS_ERR(rq))
- return PTR_ERR(rq);
-
- cs = intel_ring_begin(rq, 2);
- if (IS_ERR(cs)) {
- i915_request_add(rq);
- return PTR_ERR(cs);
- }
-
- *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
- *cs++ = MI_NOOP;
- intel_ring_advance(rq, cs);
-
- i915_request_add(rq);
-
- return i915_active_wait(&overlay->last_flip);
-}
-
void intel_overlay_reset(struct intel_display *display)
{
struct intel_overlay *overlay = display->overlay;
@@ -505,7 +198,8 @@ void intel_overlay_reset(struct intel_display *display)
overlay->old_xscale = 0;
overlay->old_yscale = 0;
overlay->crtc = NULL;
- overlay->active = false;
+
+ intel_parent_overlay_reset(display);
}
static int packed_depth_bytes(u32 format)
@@ -767,34 +461,8 @@ static u32 overlay_cmd_reg(struct drm_intel_overlay_put_image *params)
return cmd;
}
-static struct i915_vma *intel_overlay_pin_fb(struct drm_i915_gem_object *new_bo)
-{
- struct i915_gem_ww_ctx ww;
- struct i915_vma *vma;
- int ret;
-
- i915_gem_ww_ctx_init(&ww, true);
-retry:
- ret = i915_gem_object_lock(new_bo, &ww);
- if (!ret) {
- vma = i915_gem_object_pin_to_display_plane(new_bo, &ww, 0, 0,
- NULL, PIN_MAPPABLE);
- ret = PTR_ERR_OR_ZERO(vma);
- }
- if (ret == -EDEADLK) {
- ret = i915_gem_ww_ctx_backoff(&ww);
- if (!ret)
- goto retry;
- }
- i915_gem_ww_ctx_fini(&ww);
- if (ret)
- return ERR_PTR(ret);
-
- return vma;
-}
-
static int intel_overlay_do_put_image(struct intel_overlay *overlay,
- struct drm_i915_gem_object *new_bo,
+ struct drm_gem_object *obj,
struct drm_intel_overlay_put_image *params)
{
struct intel_display *display = overlay->display;
@@ -804,23 +472,24 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
bool scale_changed = false;
struct i915_vma *vma;
int ret, tmp_width;
+ u32 tmp, offset;
drm_WARN_ON(display->drm,
!drm_modeset_is_locked(&display->drm->mode_config.connection_mutex));
- ret = intel_overlay_release_old_vid(overlay);
+ ret = intel_parent_overlay_release_old_vid(display);
if (ret != 0)
return ret;
atomic_inc(&display->restore.pending_fb_pin);
- vma = intel_overlay_pin_fb(new_bo);
+ vma = intel_parent_overlay_pin_fb(display, obj, &offset);
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
goto out_pin_section;
}
- if (!overlay->active) {
+ if (!intel_parent_overlay_is_active(display)) {
const struct intel_crtc_state *crtc_state =
overlay->crtc->config;
u32 oconfig = 0;
@@ -836,7 +505,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
OCONF_PIPE_A : OCONF_PIPE_B;
iowrite32(oconfig, &regs->OCONFIG);
- ret = intel_overlay_on(overlay);
+ ret = intel_parent_overlay_on(display, INTEL_FRONTBUFFER_OVERLAY(pipe));
if (ret != 0)
goto out_unpin;
}
@@ -853,7 +522,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
swidth = params->src_width;
swidthsw = calc_swidthsw(display, params->offset_Y, tmp_width);
sheight = params->src_height;
- iowrite32(i915_ggtt_offset(vma) + params->offset_Y, &regs->OBUF_0Y);
+ iowrite32(offset + params->offset_Y, &regs->OBUF_0Y);
ostride = params->stride_Y;
if (params->flags & I915_OVERLAY_YUV_PLANAR) {
@@ -870,9 +539,9 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
params->src_width / uv_hscale);
swidthsw |= max(tmp_U, tmp_V) << 16;
- iowrite32(i915_ggtt_offset(vma) + params->offset_U,
+ iowrite32(offset + params->offset_U,
&regs->OBUF_0U);
- iowrite32(i915_ggtt_offset(vma) + params->offset_V,
+ iowrite32(offset + params->offset_V,
&regs->OBUF_0V);
ostride |= params->stride_UV << 16;
@@ -889,14 +558,19 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
iowrite32(overlay_cmd_reg(params), &regs->OCMD);
- ret = intel_overlay_continue(overlay, vma, scale_changed);
+ /* check for underruns */
+ tmp = intel_de_read(display, DOVSTA);
+ if (tmp & (1 << 17))
+ drm_dbg(display->drm, "overlay underrun, DOVSTA: %x\n", tmp);
+
+ ret = intel_parent_overlay_continue(display, vma, scale_changed);
if (ret)
goto out_unpin;
return 0;
out_unpin:
- i915_vma_unpin(vma);
+ intel_parent_overlay_unpin_fb(display, vma);
out_pin_section:
atomic_dec(&display->restore.pending_fb_pin);
@@ -911,20 +585,23 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
drm_WARN_ON(display->drm,
!drm_modeset_is_locked(&display->drm->mode_config.connection_mutex));
- ret = intel_overlay_recover_from_interrupt(overlay);
+ ret = intel_parent_overlay_recover_from_interrupt(display);
if (ret != 0)
return ret;
- if (!overlay->active)
+ if (!intel_parent_overlay_is_active(display))
return 0;
- ret = intel_overlay_release_old_vid(overlay);
+ ret = intel_parent_overlay_release_old_vid(display);
if (ret != 0)
return ret;
iowrite32(0, &overlay->regs->OCMD);
- return intel_overlay_off(overlay);
+ overlay->crtc->overlay = NULL;
+ overlay->crtc = NULL;
+
+ return intel_parent_overlay_off(display);
}
static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
@@ -1006,7 +683,7 @@ static int check_overlay_scaling(struct drm_intel_overlay_put_image *rec)
static int check_overlay_src(struct intel_display *display,
struct drm_intel_overlay_put_image *rec,
- struct drm_i915_gem_object *new_bo)
+ struct drm_gem_object *obj)
{
int uv_hscale = uv_hsubsampling(rec->flags);
int uv_vscale = uv_vsubsampling(rec->flags);
@@ -1091,7 +768,7 @@ static int check_overlay_src(struct intel_display *display,
return -EINVAL;
tmp = rec->stride_Y*rec->src_height;
- if (rec->offset_Y + tmp > new_bo->base.size)
+ if (rec->offset_Y + tmp > obj->size)
return -EINVAL;
break;
@@ -1102,12 +779,12 @@ static int check_overlay_src(struct intel_display *display,
return -EINVAL;
tmp = rec->stride_Y * rec->src_height;
- if (rec->offset_Y + tmp > new_bo->base.size)
+ if (rec->offset_Y + tmp > obj->size)
return -EINVAL;
tmp = rec->stride_UV * (rec->src_height / uv_vscale);
- if (rec->offset_U + tmp > new_bo->base.size ||
- rec->offset_V + tmp > new_bo->base.size)
+ if (rec->offset_U + tmp > obj->size ||
+ rec->offset_V + tmp > obj->size)
return -EINVAL;
break;
}
@@ -1122,8 +799,8 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
struct drm_intel_overlay_put_image *params = data;
struct intel_overlay *overlay;
struct drm_crtc *drmmode_crtc;
+ struct drm_gem_object *obj;
struct intel_crtc *crtc;
- struct drm_i915_gem_object *new_bo;
int ret;
overlay = display->overlay;
@@ -1145,20 +822,13 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
return -ENOENT;
crtc = to_intel_crtc(drmmode_crtc);
- new_bo = i915_gem_object_lookup(file_priv, params->bo_handle);
- if (!new_bo)
- return -ENOENT;
+ obj = intel_parent_overlay_obj_lookup(display, file_priv, params->bo_handle);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
drm_modeset_lock_all(dev);
- if (i915_gem_object_is_tiled(new_bo)) {
- drm_dbg_kms(display->drm,
- "buffer used for overlay image can not be tiled\n");
- ret = -EINVAL;
- goto out_unlock;
- }
-
- ret = intel_overlay_recover_from_interrupt(overlay);
+ ret = intel_parent_overlay_recover_from_interrupt(display);
if (ret != 0)
goto out_unlock;
@@ -1201,7 +871,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
goto out_unlock;
}
- ret = check_overlay_src(display, params, new_bo);
+ ret = check_overlay_src(display, params, obj);
if (ret != 0)
goto out_unlock;
@@ -1210,18 +880,18 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
if (ret != 0)
goto out_unlock;
- ret = intel_overlay_do_put_image(overlay, new_bo, params);
+ ret = intel_overlay_do_put_image(overlay, obj, params);
if (ret != 0)
goto out_unlock;
drm_modeset_unlock_all(dev);
- i915_gem_object_put(new_bo);
+ drm_gem_object_put(obj);
return 0;
out_unlock:
drm_modeset_unlock_all(dev);
- i915_gem_object_put(new_bo);
+ drm_gem_object_put(obj);
return ret;
}
@@ -1328,7 +998,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
if (DISPLAY_VER(display) == 2)
goto out_unlock;
- if (overlay->active) {
+ if (intel_parent_overlay_is_active(display)) {
ret = -EBUSY;
goto out_unlock;
}
@@ -1354,80 +1024,31 @@ out_unlock:
return ret;
}
-static int get_registers(struct intel_overlay *overlay, bool use_phys)
-{
- struct intel_display *display = overlay->display;
- struct drm_i915_private *i915 = to_i915(display->drm);
- struct drm_i915_gem_object *obj = ERR_PTR(-ENODEV);
- struct i915_vma *vma;
- int err;
-
- if (!display->platform.meteorlake) /* Wa_22018444074 */
- obj = i915_gem_object_create_stolen(i915, PAGE_SIZE);
- if (IS_ERR(obj))
- obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
- if (IS_ERR(obj))
- return PTR_ERR(obj);
-
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_put_bo;
- }
-
- if (use_phys)
- overlay->flip_addr = sg_dma_address(obj->mm.pages->sgl);
- else
- overlay->flip_addr = i915_ggtt_offset(vma);
- overlay->regs = i915_vma_pin_iomap(vma);
- i915_vma_unpin(vma);
-
- if (IS_ERR(overlay->regs)) {
- err = PTR_ERR(overlay->regs);
- goto err_put_bo;
- }
-
- overlay->reg_bo = obj;
- return 0;
-
-err_put_bo:
- i915_gem_object_put(obj);
- return err;
-}
-
void intel_overlay_setup(struct intel_display *display)
{
- struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_overlay *overlay;
- struct intel_engine_cs *engine;
- int ret;
+ void __iomem *regs;
if (!HAS_OVERLAY(display))
return;
- engine = to_gt(dev_priv)->engine[RCS0];
- if (!engine || !engine->kernel_context)
- return;
-
overlay = kzalloc_obj(*overlay);
if (!overlay)
return;
+ regs = intel_parent_overlay_setup(display,
+ OVERLAY_NEEDS_PHYSICAL(display));
+ if (IS_ERR(regs))
+ goto out_free;
+
overlay->display = display;
- overlay->context = engine->kernel_context;
+ overlay->regs = regs;
overlay->color_key = 0x0101fe;
overlay->color_key_enabled = true;
overlay->brightness = -19;
overlay->contrast = 75;
overlay->saturation = 146;
- i915_active_init(&overlay->last_flip,
- NULL, intel_overlay_last_flip_retire, 0);
-
- ret = get_registers(overlay, OVERLAY_NEEDS_PHYSICAL(display));
- if (ret)
- goto out_free;
-
memset_io(overlay->regs, 0, sizeof(struct overlay_registers));
update_polyphase_filter(overlay->regs);
update_reg_attrs(overlay, overlay->regs);
@@ -1447,110 +1068,11 @@ bool intel_overlay_available(struct intel_display *display)
void intel_overlay_cleanup(struct intel_display *display)
{
- struct intel_overlay *overlay;
-
- overlay = fetch_and_zero(&display->overlay);
- if (!overlay)
+ if (!display->overlay)
return;
- /*
- * The bo's should be free'd by the generic code already.
- * Furthermore modesetting teardown happens beforehand so the
- * hardware should be off already.
- */
- drm_WARN_ON(display->drm, overlay->active);
-
- i915_gem_object_put(overlay->reg_bo);
- i915_active_fini(&overlay->last_flip);
+ intel_parent_overlay_cleanup(display);
- kfree(overlay);
+ kfree(display->overlay);
+ display->overlay = NULL;
}
-
-#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
-
-struct intel_overlay_snapshot {
- struct overlay_registers regs;
- unsigned long base;
- u32 dovsta;
- u32 isr;
-};
-
-struct intel_overlay_snapshot *
-intel_overlay_snapshot_capture(struct intel_display *display)
-{
- struct intel_overlay *overlay = display->overlay;
- struct intel_overlay_snapshot *error;
-
- if (!overlay || !overlay->active)
- return NULL;
-
- error = kmalloc_obj(*error, GFP_ATOMIC);
- if (error == NULL)
- return NULL;
-
- error->dovsta = intel_de_read(display, DOVSTA);
- error->isr = intel_de_read(display, GEN2_ISR);
- error->base = overlay->flip_addr;
-
- memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs));
-
- return error;
-}
-
-void
-intel_overlay_snapshot_print(const struct intel_overlay_snapshot *error,
- struct drm_printer *p)
-{
- if (!error)
- return;
-
- drm_printf(p, "Overlay, status: 0x%08x, interrupt: 0x%08x\n",
- error->dovsta, error->isr);
- drm_printf(p, " Register file at 0x%08lx:\n", error->base);
-
-#define P(x) drm_printf(p, " " #x ": 0x%08x\n", error->regs.x)
- P(OBUF_0Y);
- P(OBUF_1Y);
- P(OBUF_0U);
- P(OBUF_0V);
- P(OBUF_1U);
- P(OBUF_1V);
- P(OSTRIDE);
- P(YRGB_VPH);
- P(UV_VPH);
- P(HORZ_PH);
- P(INIT_PHS);
- P(DWINPOS);
- P(DWINSZ);
- P(SWIDTH);
- P(SWIDTHSW);
- P(SHEIGHT);
- P(YRGBSCALE);
- P(UVSCALE);
- P(OCLRC0);
- P(OCLRC1);
- P(DCLRKV);
- P(DCLRKM);
- P(SCLRKVH);
- P(SCLRKVL);
- P(SCLRKEN);
- P(OCONFIG);
- P(OCMD);
- P(OSTART_0Y);
- P(OSTART_1Y);
- P(OSTART_0U);
- P(OSTART_0V);
- P(OSTART_1U);
- P(OSTART_1V);
- P(OTILEOFF_0Y);
- P(OTILEOFF_1Y);
- P(OTILEOFF_0U);
- P(OTILEOFF_0V);
- P(OTILEOFF_1U);
- P(OTILEOFF_1V);
- P(FASTHSCALE);
- P(UVSCALEV);
-#undef P
-}
-
-#endif
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.h b/drivers/gpu/drm/i915/display/intel_overlay.h
index d259e4c74b03..a4291d6dd528 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.h
+++ b/drivers/gpu/drm/i915/display/intel_overlay.h
@@ -13,9 +13,7 @@ struct drm_file;
struct drm_printer;
struct intel_display;
struct intel_overlay;
-struct intel_overlay_snapshot;
-#ifdef I915
void intel_overlay_setup(struct intel_display *display);
bool intel_overlay_available(struct intel_display *display);
void intel_overlay_cleanup(struct intel_display *display);
@@ -25,51 +23,5 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void intel_overlay_reset(struct intel_display *display);
-#else
-static inline void intel_overlay_setup(struct intel_display *display)
-{
-}
-static inline bool intel_overlay_available(struct intel_display *display)
-{
- return false;
-}
-static inline void intel_overlay_cleanup(struct intel_display *display)
-{
-}
-static inline int intel_overlay_switch_off(struct intel_overlay *overlay)
-{
- return 0;
-}
-static inline int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return 0;
-}
-static inline int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- return 0;
-}
-static inline void intel_overlay_reset(struct intel_display *display)
-{
-}
-#endif
-
-#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR) && defined(I915)
-struct intel_overlay_snapshot *
-intel_overlay_snapshot_capture(struct intel_display *display);
-void intel_overlay_snapshot_print(const struct intel_overlay_snapshot *error,
- struct drm_printer *p);
-#else
-static inline struct intel_overlay_snapshot *
-intel_overlay_snapshot_capture(struct intel_display *display)
-{
- return NULL;
-}
-static inline void intel_overlay_snapshot_print(const struct intel_overlay_snapshot *error,
- struct drm_printer *p)
-{
-}
-#endif
#endif /* __INTEL_OVERLAY_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_parent.c b/drivers/gpu/drm/i915/display/intel_parent.c
index 72ae553f79a4..2e3bad2b3e6b 100644
--- a/drivers/gpu/drm/i915/display/intel_parent.c
+++ b/drivers/gpu/drm/i915/display/intel_parent.c
@@ -23,6 +23,55 @@
#include "intel_display_core.h"
#include "intel_parent.h"
+/* dpt */
+struct intel_dpt *intel_parent_dpt_create(struct intel_display *display,
+ struct drm_gem_object *obj, size_t size)
+{
+ if (display->parent->dpt)
+ return display->parent->dpt->create(obj, size);
+
+ return NULL;
+}
+
+void intel_parent_dpt_destroy(struct intel_display *display, struct intel_dpt *dpt)
+{
+ if (display->parent->dpt)
+ display->parent->dpt->destroy(dpt);
+}
+
+void intel_parent_dpt_suspend(struct intel_display *display, struct intel_dpt *dpt)
+{
+ if (display->parent->dpt)
+ display->parent->dpt->suspend(dpt);
+}
+
+void intel_parent_dpt_resume(struct intel_display *display, struct intel_dpt *dpt)
+{
+ if (display->parent->dpt)
+ display->parent->dpt->resume(dpt);
+}
+
+/* frontbuffer */
+struct intel_frontbuffer *intel_parent_frontbuffer_get(struct intel_display *display, struct drm_gem_object *obj)
+{
+ return display->parent->frontbuffer->get(obj);
+}
+
+void intel_parent_frontbuffer_ref(struct intel_display *display, struct intel_frontbuffer *front)
+{
+ display->parent->frontbuffer->ref(front);
+}
+
+void intel_parent_frontbuffer_put(struct intel_display *display, struct intel_frontbuffer *front)
+{
+ display->parent->frontbuffer->put(front);
+}
+
+void intel_parent_frontbuffer_flush_for_display(struct intel_display *display, struct intel_frontbuffer *front)
+{
+ display->parent->frontbuffer->flush_for_display(front);
+}
+
/* hdcp */
ssize_t intel_parent_hdcp_gsc_msg_send(struct intel_display *display,
struct intel_hdcp_gsc_context *gsc_context,
@@ -59,6 +108,82 @@ void intel_parent_irq_synchronize(struct intel_display *display)
display->parent->irq->synchronize(display->drm);
}
+/* overlay */
+bool intel_parent_overlay_is_active(struct intel_display *display)
+{
+ return display->parent->overlay->is_active(display->drm);
+}
+
+int intel_parent_overlay_on(struct intel_display *display,
+ u32 frontbuffer_bits)
+{
+ return display->parent->overlay->overlay_on(display->drm,
+ frontbuffer_bits);
+}
+
+int intel_parent_overlay_continue(struct intel_display *display,
+ struct i915_vma *vma,
+ bool load_polyphase_filter)
+{
+ return display->parent->overlay->overlay_continue(display->drm, vma,
+ load_polyphase_filter);
+}
+
+int intel_parent_overlay_off(struct intel_display *display)
+{
+ return display->parent->overlay->overlay_off(display->drm);
+}
+
+int intel_parent_overlay_recover_from_interrupt(struct intel_display *display)
+{
+ return display->parent->overlay->recover_from_interrupt(display->drm);
+}
+
+int intel_parent_overlay_release_old_vid(struct intel_display *display)
+{
+ return display->parent->overlay->release_old_vid(display->drm);
+}
+
+void intel_parent_overlay_reset(struct intel_display *display)
+{
+ display->parent->overlay->reset(display->drm);
+}
+
+struct i915_vma *intel_parent_overlay_pin_fb(struct intel_display *display,
+ struct drm_gem_object *obj,
+ u32 *offset)
+{
+ return display->parent->overlay->pin_fb(display->drm, obj, offset);
+}
+
+void intel_parent_overlay_unpin_fb(struct intel_display *display,
+ struct i915_vma *vma)
+{
+ return display->parent->overlay->unpin_fb(display->drm, vma);
+}
+
+struct drm_gem_object *intel_parent_overlay_obj_lookup(struct intel_display *display,
+ struct drm_file *filp,
+ u32 handle)
+{
+ return display->parent->overlay->obj_lookup(display->drm,
+ filp, handle);
+}
+
+void __iomem *intel_parent_overlay_setup(struct intel_display *display,
+ bool needs_physical)
+{
+ if (drm_WARN_ON_ONCE(display->drm, !display->parent->overlay))
+ return ERR_PTR(-ENODEV);
+
+ return display->parent->overlay->setup(display->drm, needs_physical);
+}
+
+void intel_parent_overlay_cleanup(struct intel_display *display)
+{
+ display->parent->overlay->cleanup(display->drm);
+}
+
/* panic */
struct intel_panic *intel_parent_panic_alloc(struct intel_display *display)
{
@@ -92,6 +217,28 @@ void intel_parent_pc8_unblock(struct intel_display *display)
display->parent->pc8->unblock(display->drm);
}
+/* pcode */
+int intel_parent_pcode_read(struct intel_display *display, u32 mbox, u32 *val, u32 *val1)
+{
+ return display->parent->pcode->read(display->drm, mbox, val, val1);
+}
+
+int intel_parent_pcode_write_timeout(struct intel_display *display, u32 mbox, u32 val, int timeout_ms)
+{
+ return display->parent->pcode->write(display->drm, mbox, val, timeout_ms);
+}
+
+int intel_parent_pcode_write(struct intel_display *display, u32 mbox, u32 val)
+{
+ return intel_parent_pcode_write_timeout(display, mbox, val, 1);
+}
+
+int intel_parent_pcode_request(struct intel_display *display, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms)
+{
+ return display->parent->pcode->request(display->drm, mbox, request, reply_mask, reply, timeout_base_ms);
+}
+
/* rps */
bool intel_parent_rps_available(struct intel_display *display)
{
@@ -191,6 +338,15 @@ void intel_parent_stolen_node_free(struct intel_display *display, const struct i
display->parent->stolen->node_free(node);
}
+/* vma */
+int intel_parent_vma_fence_id(struct intel_display *display, const struct i915_vma *vma)
+{
+ if (!display->parent->vma)
+ return -1;
+
+ return display->parent->vma->fence_id(vma);
+}
+
/* generic */
void intel_parent_fence_priority_display(struct intel_display *display, struct dma_fence *fence)
{
diff --git a/drivers/gpu/drm/i915/display/intel_parent.h b/drivers/gpu/drm/i915/display/intel_parent.h
index 47cdc14f9aa2..2013e5ed5aa9 100644
--- a/drivers/gpu/drm/i915/display/intel_parent.h
+++ b/drivers/gpu/drm/i915/display/intel_parent.h
@@ -7,12 +7,30 @@
#include <linux/types.h>
struct dma_fence;
+struct drm_file;
+struct drm_gem_object;
struct drm_scanout_buffer;
+struct i915_vma;
struct intel_display;
+struct intel_dpt;
+struct intel_frontbuffer;
struct intel_hdcp_gsc_context;
struct intel_panic;
struct intel_stolen_node;
+/* dpt */
+struct intel_dpt *intel_parent_dpt_create(struct intel_display *display,
+ struct drm_gem_object *obj, size_t size);
+void intel_parent_dpt_destroy(struct intel_display *display, struct intel_dpt *dpt);
+void intel_parent_dpt_suspend(struct intel_display *display, struct intel_dpt *dpt);
+void intel_parent_dpt_resume(struct intel_display *display, struct intel_dpt *dpt);
+
+/* frontbuffer */
+struct intel_frontbuffer *intel_parent_frontbuffer_get(struct intel_display *display, struct drm_gem_object *obj);
+void intel_parent_frontbuffer_ref(struct intel_display *display, struct intel_frontbuffer *front);
+void intel_parent_frontbuffer_put(struct intel_display *display, struct intel_frontbuffer *front);
+void intel_parent_frontbuffer_flush_for_display(struct intel_display *display, struct intel_frontbuffer *front);
+
/* hdcp */
ssize_t intel_parent_hdcp_gsc_msg_send(struct intel_display *display,
struct intel_hdcp_gsc_context *gsc_context,
@@ -27,6 +45,29 @@ void intel_parent_hdcp_gsc_context_free(struct intel_display *display,
bool intel_parent_irq_enabled(struct intel_display *display);
void intel_parent_irq_synchronize(struct intel_display *display);
+/* overlay */
+bool intel_parent_overlay_is_active(struct intel_display *display);
+int intel_parent_overlay_on(struct intel_display *display,
+ u32 frontbuffer_bits);
+int intel_parent_overlay_continue(struct intel_display *display,
+ struct i915_vma *vma,
+ bool load_polyphase_filter);
+int intel_parent_overlay_off(struct intel_display *display);
+int intel_parent_overlay_recover_from_interrupt(struct intel_display *display);
+int intel_parent_overlay_release_old_vid(struct intel_display *display);
+void intel_parent_overlay_reset(struct intel_display *display);
+struct i915_vma *intel_parent_overlay_pin_fb(struct intel_display *display,
+ struct drm_gem_object *obj,
+ u32 *offset);
+void intel_parent_overlay_unpin_fb(struct intel_display *display,
+ struct i915_vma *vma);
+struct drm_gem_object *intel_parent_overlay_obj_lookup(struct intel_display *display,
+ struct drm_file *filp,
+ u32 handle);
+void __iomem *intel_parent_overlay_setup(struct intel_display *display,
+ bool needs_physical);
+void intel_parent_overlay_cleanup(struct intel_display *display);
+
/* panic */
struct intel_panic *intel_parent_panic_alloc(struct intel_display *display);
int intel_parent_panic_setup(struct intel_display *display, struct intel_panic *panic, struct drm_scanout_buffer *sb);
@@ -36,6 +77,13 @@ void intel_parent_panic_finish(struct intel_display *display, struct intel_panic
void intel_parent_pc8_block(struct intel_display *display);
void intel_parent_pc8_unblock(struct intel_display *display);
+/* pcode */
+int intel_parent_pcode_read(struct intel_display *display, u32 mbox, u32 *val, u32 *val1);
+int intel_parent_pcode_write_timeout(struct intel_display *display, u32 mbox, u32 val, int timeout_ms);
+int intel_parent_pcode_write(struct intel_display *display, u32 mbox, u32 val);
+int intel_parent_pcode_request(struct intel_display *display, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms);
+
/* rps */
bool intel_parent_rps_available(struct intel_display *display);
void intel_parent_rps_boost_if_not_started(struct intel_display *display, struct dma_fence *fence);
@@ -61,6 +109,9 @@ u64 intel_parent_stolen_node_size(struct intel_display *display, const struct in
struct intel_stolen_node *intel_parent_stolen_node_alloc(struct intel_display *display);
void intel_parent_stolen_node_free(struct intel_display *display, const struct intel_stolen_node *node);
+/* vma */
+int intel_parent_vma_fence_id(struct intel_display *display, const struct i915_vma *vma);
+
/* generic */
bool intel_parent_has_auxccs(struct intel_display *display);
bool intel_parent_has_fenced_regions(struct intel_display *display);
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
index 16619f7be5f8..69c7952a1413 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -6,7 +6,6 @@
#include <drm/drm_print.h>
#include "g4x_dp.h"
-#include "i915_reg.h"
#include "intel_crt.h"
#include "intel_crt_regs.h"
#include "intel_de.h"
diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
index 9a89bb6dcf65..5f88663ef5e8 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c
@@ -5,7 +5,6 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
diff --git a/drivers/gpu/drm/i915/display/intel_pmdemand.c b/drivers/gpu/drm/i915/display/intel_pmdemand.c
index f3db55710010..244806a26da3 100644
--- a/drivers/gpu/drm/i915/display/intel_pmdemand.c
+++ b/drivers/gpu/drm/i915/display/intel_pmdemand.c
@@ -15,6 +15,7 @@
#include "intel_display_regs.h"
#include "intel_display_trace.h"
#include "intel_display_utils.h"
+#include "intel_display_wa.h"
#include "intel_pmdemand.h"
#include "intel_step.h"
#include "skl_watermark.h"
@@ -129,9 +130,10 @@ int intel_pmdemand_init(struct intel_display *display)
&pmdemand_state->base,
&intel_pmdemand_funcs);
- if (IS_DISPLAY_VERx100_STEP(display, 1400, STEP_A0, STEP_C0))
- /* Wa_14016740474 */
- intel_de_rmw(display, XELPD_CHICKEN_DCPR_3, 0, DMD_RSP_TIMEOUT_DISABLE);
+ /* Wa_14016740474 */
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_14016740474))
+ intel_de_rmw(display, XELPD_CHICKEN_DCPR_3, 0,
+ DMD_RSP_TIMEOUT_DISABLE);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index b217ec7aa758..2d799af73bb7 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -9,7 +9,6 @@
#include <drm/drm_print.h>
#include "g4x_dp.h"
-#include "i915_reg.h"
#include "intel_de.h"
#include "intel_display_jiffies.h"
#include "intel_display_power_well.h"
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 4ce1173a2e91..5041a5a138d1 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -29,7 +29,6 @@
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
-#include "i915_reg.h"
#include "intel_alpm.h"
#include "intel_atomic.h"
#include "intel_crtc.h"
@@ -41,6 +40,7 @@
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_display_utils.h"
+#include "intel_display_wa.h"
#include "intel_dmc.h"
#include "intel_dp.h"
#include "intel_dp_aux.h"
@@ -1083,7 +1083,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
}
/* Wa_22012278275:adl-p */
- if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) {
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_22012278275)) {
static const u8 map[] = {
2, /* 5 lines */
1, /* 6 lines */
@@ -1264,7 +1264,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
return;
/* Wa_16011303918:adl-p */
- if (display->platform.alderlake_p && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0))
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_16011303918))
return;
/*
@@ -1546,8 +1546,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
}
/* Wa_16011181250 */
- if (display->platform.rocketlake || display->platform.alderlake_s ||
- display->platform.dg2) {
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_16011181250)) {
drm_dbg_kms(display->drm,
"PSR2 is defeatured for this platform\n");
return false;
@@ -1829,8 +1828,7 @@ void intel_psr_set_non_psr_pipes(struct intel_dp *intel_dp,
u8 active_pipes = 0;
/* Wa_16025596647 */
- if (DISPLAY_VER(display) != 20 &&
- !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ if (!intel_display_wa(display, INTEL_DISPLAY_WA_16025596647))
return;
/* Not needed by Panel Replay */
@@ -2130,6 +2128,9 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
intel_dmc_block_pkgc(display, intel_dp->psr.pipe, true);
intel_alpm_configure(intel_dp, crtc_state);
+
+ if (HAS_PSR_TRANS_PUSH_FRAME_CHANGE(display))
+ intel_vrr_psr_frame_change_enable(crtc_state);
}
static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
@@ -2521,9 +2522,11 @@ void intel_psr_trigger_frame_change_event(struct intel_dsb *dsb,
intel_pre_commit_crtc_state(state, crtc);
struct intel_display *display = to_intel_display(crtc);
- if (crtc_state->has_psr)
- intel_de_write_dsb(display, dsb,
- CURSURFLIVE(display, crtc->pipe), 0);
+ if (!crtc_state->has_psr || intel_psr_use_trans_push(crtc_state))
+ return;
+
+ intel_de_write_dsb(display, dsb,
+ CURSURFLIVE(display, crtc->pipe), 0);
}
/**
@@ -2619,6 +2622,12 @@ void intel_psr2_program_trans_man_trk_ctl(struct intel_dsb *dsb,
intel_de_write_dsb(display, dsb, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
crtc_state->pipe_srcsz_early_tpt);
+
+ if (!crtc_state->dsc.compression_enable)
+ return;
+
+ intel_dsc_su_et_parameters_configure(dsb, encoder, crtc_state,
+ drm_rect_height(&crtc_state->psr2_su_area));
}
static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
@@ -2689,11 +2698,12 @@ static void clip_area_update(struct drm_rect *overlap_damage_area,
overlap_damage_area->y2 = damage_area->y2;
}
-static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
+static bool intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
u16 y_alignment;
+ bool su_area_changed = false;
/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
if (crtc_state->dsc.compression_enable &&
@@ -2702,10 +2712,18 @@ static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_st
else
y_alignment = crtc_state->su_y_granularity;
- crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
- if (crtc_state->psr2_su_area.y2 % y_alignment)
+ if (crtc_state->psr2_su_area.y1 % y_alignment) {
+ crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
+ su_area_changed = true;
+ }
+
+ if (crtc_state->psr2_su_area.y2 % y_alignment) {
crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 /
y_alignment) + 1) * y_alignment;
+ su_area_changed = true;
+ }
+
+ return su_area_changed;
}
/*
@@ -2839,7 +2857,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
struct intel_plane_state *new_plane_state, *old_plane_state;
struct intel_plane *plane;
- bool full_update = false, cursor_in_su_area = false;
+ bool full_update = false, su_area_changed;
int i, ret;
if (!crtc_state->enable_psr2_sel_fetch)
@@ -2946,15 +2964,32 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if (ret)
return ret;
- /*
- * Adjust su area to cover cursor fully as necessary (early
- * transport). This needs to be done after
- * drm_atomic_add_affected_planes to ensure visible cursor is added into
- * affected planes even when cursor is not updated by itself.
- */
- intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area);
+ do {
+ bool cursor_in_su_area;
- intel_psr2_sel_fetch_pipe_alignment(crtc_state);
+ /*
+ * Adjust su area to cover cursor fully as necessary
+ * (early transport). This needs to be done after
+ * drm_atomic_add_affected_planes to ensure visible
+ * cursor is added into affected planes even when
+ * cursor is not updated by itself.
+ */
+ intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area);
+
+ su_area_changed = intel_psr2_sel_fetch_pipe_alignment(crtc_state);
+
+ /*
+ * If the cursor was outside the SU area before
+ * alignment, the alignment step (which only expands
+ * SU) may pull the cursor partially inside, so we
+ * must run ET alignment again to fully cover it. But
+ * if the cursor was already fully inside before
+ * alignment, expanding the SU area won't change that,
+ * so no further work is needed.
+ */
+ if (cursor_in_su_area)
+ break;
+ } while (su_area_changed);
/*
* Now that we have the pipe damaged area check if it intersect with
@@ -3014,6 +3049,10 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
}
skip_sel_fetch_set_loop:
+ if (full_update)
+ clip_area_update(&crtc_state->psr2_su_area, &crtc_state->pipe_src,
+ &crtc_state->pipe_src);
+
psr2_man_trk_ctl_calc(crtc_state, full_update);
crtc_state->pipe_srcsz_early_tpt =
psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update);
@@ -3562,7 +3601,14 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
{
struct intel_display *display = to_intel_display(intel_dp);
- if (DISPLAY_VER(display) < 20 && intel_dp->psr.psr2_sel_fetch_enabled) {
+ if (DISPLAY_VER(display) >= 20) {
+ /*
+ * We can use PSR exit on LunarLake onwards. Also
+ * using trans push mechanism to trigger Frame Change
+ * event requires using PSR exit.
+ */
+ intel_psr_exit(intel_dp);
+ } else if (intel_dp->psr.psr2_sel_fetch_enabled) {
/* Selective fetch prior LNL */
if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
/* can we turn CFF off? */
@@ -3582,16 +3628,11 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
intel_psr_configure_full_frame_update(intel_dp);
intel_psr_force_update(intel_dp);
- } else if (!intel_dp->psr.psr2_sel_fetch_enabled) {
+ } else {
/*
- * PSR1 on all platforms
- * PSR2 HW tracking
- * Panel Replay Full frame update
+ * On older platforms using PSR exit was seen causing problems
*/
intel_psr_force_update(intel_dp);
- } else {
- /* Selective update LNL onwards */
- intel_psr_exit(intel_dp);
}
if (!intel_dp->psr.active && !intel_dp->psr.busy_frontbuffer_bits)
@@ -3972,8 +4013,7 @@ static void psr_dc5_dc6_wa_work(struct work_struct *work)
*/
void intel_psr_notify_dc5_dc6(struct intel_display *display)
{
- if (DISPLAY_VER(display) != 20 &&
- !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ if (!intel_display_wa(display, INTEL_DISPLAY_WA_16025596647))
return;
schedule_work(&display->psr_dc5_dc6_wa_work);
@@ -3988,8 +4028,7 @@ void intel_psr_notify_dc5_dc6(struct intel_display *display)
*/
void intel_psr_dc5_dc6_wa_init(struct intel_display *display)
{
- if (DISPLAY_VER(display) != 20 &&
- !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ if (!intel_display_wa(display, INTEL_DISPLAY_WA_16025596647))
return;
INIT_WORK(&display->psr_dc5_dc6_wa_work, psr_dc5_dc6_wa_work);
@@ -4010,8 +4049,7 @@ void intel_psr_notify_pipe_change(struct intel_atomic_state *state,
struct intel_display *display = to_intel_display(state);
struct intel_encoder *encoder;
- if (DISPLAY_VER(display) != 20 &&
- !IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0))
+ if (!intel_display_wa(display, INTEL_DISPLAY_WA_16025596647))
return;
for_each_intel_encoder_with_psr(display->drm, encoder) {
@@ -4567,3 +4605,10 @@ int intel_psr_min_guardband(struct intel_crtc_state *crtc_state)
return psr_min_guardband;
}
+
+bool intel_psr_use_trans_push(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+
+ return HAS_PSR_TRANS_PUSH_FRAME_CHANGE(display) && crtc_state->has_psr;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index b41dc4d44ff2..394b641840b3 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -85,5 +85,6 @@ bool intel_psr_needs_alpm_aux_less(struct intel_dp *intel_dp,
void intel_psr_compute_config_late(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state);
int intel_psr_min_guardband(struct intel_crtc_state *crtc_state);
+bool intel_psr_use_trans_push(const struct intel_crtc_state *crtc_state);
#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_rom.c b/drivers/gpu/drm/i915/display/intel_rom.c
index 05b6ea764ebb..d573059fb0d9 100644
--- a/drivers/gpu/drm/i915/display/intel_rom.c
+++ b/drivers/gpu/drm/i915/display/intel_rom.c
@@ -7,10 +7,9 @@
#include <drm/drm_device.h>
-#include "i915_reg.h"
-
#include "intel_rom.h"
#include "intel_uncore.h"
+#include "intel_oprom_regs.h"
struct intel_rom {
/* for PCI ROM */
diff --git a/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
index a201edceee10..7fe6b4a18213 100644
--- a/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
+++ b/drivers/gpu/drm/i915/display/intel_snps_hdmi_pll.c
@@ -332,8 +332,6 @@ void intel_snps_hdmi_pll_compute_c10pll(struct intel_c10pll_state *pll_state, u6
c10_curve_1, c10_curve_2, prescaler_divider,
&pll_params);
- pll_state->clock = pixel_clock;
-
pll_state->tx = 0x10;
pll_state->cmn = 0x1;
pll_state->pll[0] = REG_FIELD_PREP(C10_PLL0_DIV5CLK_EN, pll_params.mpll_div5_en) |
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index bd12148e42f7..a21dd4e3fe4c 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -7,7 +7,6 @@
#include <drm/drm_print.h>
-#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_cx0_phy_regs.h"
#include "intel_ddi.h"
@@ -1845,7 +1844,7 @@ bool intel_tc_port_link_reset(struct intel_digital_port *dig_port)
if (!intel_tc_port_link_needs_reset(dig_port))
return false;
- queue_delayed_work(system_unbound_wq,
+ queue_delayed_work(system_dfl_wq,
&to_tc_port(dig_port)->link_reset_work,
msecs_to_jiffies(2000));
@@ -1926,7 +1925,7 @@ void intel_tc_port_unlock(struct intel_digital_port *dig_port)
struct intel_tc_port *tc = to_tc_port(dig_port);
if (!tc->link_refcount && tc->mode != TC_PORT_DISCONNECTED)
- queue_delayed_work(system_unbound_wq, &tc->disconnect_phy_work,
+ queue_delayed_work(system_dfl_wq, &tc->disconnect_phy_work,
msecs_to_jiffies(1000));
mutex_unlock(&tc->lock);
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 57fda5824c9c..0dc13d080e8a 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -1109,6 +1109,7 @@ struct bdb_edp {
u16 edp_dsc_disable; /* 251+ */
u16 t6_delay_support; /* 260+ */
u16 link_idle_time[16]; /* 260+ */
+ u16 pipe_joiner_enable; /* 261+ */
} __packed;
/*
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 5493082f30a7..6c09c6d99ffe 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -35,6 +35,58 @@ bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state)
return true;
}
+int intel_dsc_line_slice_count(const struct intel_dsc_slice_config *config)
+{
+ return config->pipes_per_line * config->streams_per_pipe * config->slices_per_stream;
+}
+
+bool intel_dsc_get_slice_config(struct intel_display *display,
+ int pipes_per_line, int slices_per_pipe,
+ struct intel_dsc_slice_config *config)
+{
+ int streams_per_pipe;
+
+ /* TODO: Add support for 8 slices per pipe on TGL+. */
+ switch (slices_per_pipe) {
+ case 3:
+ /*
+ * 3 DSC Slices per pipe need 3 DSC engines, which is supported only
+ * with Ultrajoiner only for some platforms.
+ */
+ if (!HAS_DSC_3ENGINES(display) || pipes_per_line != 4)
+ return false;
+
+ streams_per_pipe = 3;
+ break;
+ case 4:
+ /* TODO: Consider using 1 DSC engine stream x 4 slices instead. */
+ case 2:
+ /* TODO: Consider using 1 DSC engine stream x 2 slices instead. */
+ streams_per_pipe = 2;
+ break;
+ case 1:
+ /*
+ * Bigjoiner needs small joiner to be enabled.
+ * So there should be at least 2 dsc slices per pipe,
+ * whenever bigjoiner is enabled.
+ */
+ if (pipes_per_line > 1)
+ return false;
+
+ streams_per_pipe = 1;
+ break;
+ default:
+ MISSING_CASE(slices_per_pipe);
+ return false;
+ }
+
+ config->pipes_per_line = pipes_per_line;
+ config->streams_per_pipe = streams_per_pipe;
+ config->slices_per_stream = slices_per_pipe / streams_per_pipe;
+
+ return true;
+}
+
static bool is_pipe_dsc(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
{
struct intel_display *display = to_intel_display(crtc);
@@ -278,8 +330,9 @@ int intel_dsc_compute_params(struct intel_crtc_state *pipe_config)
int ret;
vdsc_cfg->pic_width = pipe_config->hw.adjusted_mode.crtc_hdisplay;
- vdsc_cfg->slice_width = DIV_ROUND_UP(vdsc_cfg->pic_width,
- pipe_config->dsc.slice_count);
+ vdsc_cfg->slice_width =
+ DIV_ROUND_UP(vdsc_cfg->pic_width,
+ intel_dsc_line_slice_count(&pipe_config->dsc.slice_config));
err = intel_dsc_slice_dimensions_valid(pipe_config, vdsc_cfg);
@@ -416,7 +469,7 @@ intel_dsc_power_domain(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
static int intel_dsc_get_vdsc_per_pipe(const struct intel_crtc_state *crtc_state)
{
- return crtc_state->dsc.num_streams;
+ return crtc_state->dsc.slice_config.streams_per_pipe;
}
int intel_dsc_get_num_vdsc_instances(const struct intel_crtc_state *crtc_state)
@@ -767,6 +820,29 @@ void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
sizeof(dp_dsc_pps_sdp));
}
+void intel_dsc_su_et_parameters_configure(struct intel_dsb *dsb, struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state, int su_lines)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ enum pipe pipe = crtc->pipe;
+ int vdsc_instances_per_pipe = intel_dsc_get_vdsc_per_pipe(crtc_state);
+ int slice_row_per_frame = su_lines / vdsc_cfg->slice_height;
+ u32 val;
+
+ drm_WARN_ON_ONCE(display->drm, su_lines % vdsc_cfg->slice_height);
+ drm_WARN_ON_ONCE(display->drm, vdsc_instances_per_pipe > 2);
+
+ val = DSC_SUPS0_SU_SLICE_ROW_PER_FRAME(slice_row_per_frame);
+ val |= DSC_SUPS0_SU_PIC_HEIGHT(su_lines);
+
+ intel_de_write_dsb(display, dsb, LNL_DSC0_SU_PARAMETER_SET_0(pipe), val);
+
+ if (vdsc_instances_per_pipe == 2)
+ intel_de_write_dsb(display, dsb, LNL_DSC1_SU_PARAMETER_SET_0(pipe), val);
+}
+
static i915_reg_t dss_ctl1_reg(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
{
return is_pipe_dsc(crtc, cpu_transcoder) ?
@@ -1018,12 +1094,13 @@ void intel_dsc_get_config(struct intel_crtc_state *crtc_state)
if (!crtc_state->dsc.compression_enable)
goto out;
+ /* TODO: Read out slice_config.pipes_per_line/slices_per_stream as well */
if (dss_ctl1 & JOINER_ENABLE && dss_ctl2 & (VDSC2_ENABLE | SMALL_JOINER_CONFIG_3_ENGINES))
- crtc_state->dsc.num_streams = 3;
+ crtc_state->dsc.slice_config.streams_per_pipe = 3;
else if (dss_ctl1 & JOINER_ENABLE && dss_ctl2 & VDSC1_ENABLE)
- crtc_state->dsc.num_streams = 2;
+ crtc_state->dsc.slice_config.streams_per_pipe = 2;
else
- crtc_state->dsc.num_streams = 1;
+ crtc_state->dsc.slice_config.streams_per_pipe = 1;
intel_dsc_get_pps_config(crtc_state);
out:
@@ -1036,8 +1113,8 @@ static void intel_vdsc_dump_state(struct drm_printer *p, int indent,
drm_printf_indent(p, indent,
"dsc-dss: compressed-bpp:" FXP_Q4_FMT ", slice-count: %d, num_streams: %d\n",
FXP_Q4_ARGS(crtc_state->dsc.compressed_bpp_x16),
- crtc_state->dsc.slice_count,
- crtc_state->dsc.num_streams);
+ intel_dsc_line_slice_count(&crtc_state->dsc.slice_config),
+ crtc_state->dsc.slice_config.streams_per_pipe);
}
void intel_vdsc_state_dump(struct drm_printer *p, int indent,
@@ -1050,7 +1127,6 @@ void intel_vdsc_state_dump(struct drm_printer *p, int indent,
drm_dsc_dump_config(p, indent, &crtc_state->dsc.config);
}
-static
int intel_dsc_get_pixel_rate_with_dsc_bubbles(struct intel_display *display,
int pixel_rate, int htotal,
int dsc_horizontal_slices)
@@ -1072,7 +1148,7 @@ int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state)
struct intel_display *display = to_intel_display(crtc_state);
int num_vdsc_instances = intel_dsc_get_num_vdsc_instances(crtc_state);
int htotal = crtc_state->hw.adjusted_mode.crtc_htotal;
- int dsc_slices = crtc_state->dsc.slice_count;
+ int dsc_slices = intel_dsc_line_slice_count(&crtc_state->dsc.slice_config);
int pixel_rate;
int min_cdclk;
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h
index 99f64ac54b27..3372f8694054 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.h
@@ -13,9 +13,16 @@ struct drm_printer;
enum transcoder;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
+struct intel_dsb;
+struct intel_dsc_slice_config;
struct intel_encoder;
bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state);
+int intel_dsc_line_slice_count(const struct intel_dsc_slice_config *config);
+bool intel_dsc_get_slice_config(struct intel_display *display,
+ int num_joined_pipes, int slice_per_pipe,
+ struct intel_dsc_slice_config *config);
void intel_uncompressed_joiner_enable(const struct intel_crtc_state *crtc_state);
void intel_dsc_enable(const struct intel_crtc_state *crtc_state);
void intel_dsc_disable(const struct intel_crtc_state *crtc_state);
@@ -31,9 +38,14 @@ void intel_dsc_dsi_pps_write(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
+void intel_dsc_su_et_parameters_configure(struct intel_dsb *dsb, struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state, int su_lines);
void intel_vdsc_state_dump(struct drm_printer *p, int indent,
const struct intel_crtc_state *crtc_state);
int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state);
unsigned int intel_vdsc_prefill_lines(const struct intel_crtc_state *crtc_state);
+int intel_dsc_get_pixel_rate_with_dsc_bubbles(struct intel_display *display,
+ int pixel_rate, int htotal,
+ int dsc_horizontal_slices);
#endif /* __INTEL_VDSC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
index 2d478a84b07c..2b2e3c1b8138 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
@@ -196,6 +196,18 @@
#define DSC_PPS18_NSL_BPG_OFFSET(offset) REG_FIELD_PREP(DSC_PPS18_NSL_BPG_OFFSET_MASK, offset)
#define DSC_PPS18_SL_OFFSET_ADJ(offset) REG_FIELD_PREP(DSC_PPS18_SL_OFFSET_ADJ_MASK, offset)
+#define _LNL_DSC0_SU_PARAMETER_SET_0_PA 0x78064
+#define _LNL_DSC1_SU_PARAMETER_SET_0_PA 0x78164
+#define _LNL_DSC0_SU_PARAMETER_SET_0_PB 0x78264
+#define _LNL_DSC1_SU_PARAMETER_SET_0_PB 0x78364
+#define LNL_DSC0_SU_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe), _LNL_DSC0_SU_PARAMETER_SET_0_PA, _LNL_DSC0_SU_PARAMETER_SET_0_PB)
+#define LNL_DSC1_SU_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe), _LNL_DSC1_SU_PARAMETER_SET_0_PA, _LNL_DSC1_SU_PARAMETER_SET_0_PB)
+
+#define DSC_SUPS0_SU_SLICE_ROW_PER_FRAME_MASK REG_GENMASK(31, 20)
+#define DSC_SUPS0_SU_SLICE_ROW_PER_FRAME(rows) REG_FIELD_PREP(DSC_SUPS0_SU_SLICE_ROW_PER_FRAME_MASK, (rows))
+#define DSC_SUPS0_SU_PIC_HEIGHT_MASK REG_GENMASK(15, 0)
+#define DSC_SUPS0_SU_PIC_HEIGHT(h) REG_FIELD_PREP(DSC_SUPS0_SU_PIC_HEIGHT_MASK, (h))
+
/* Icelake Rate Control Buffer Threshold Registers */
#define DSCA_RC_BUF_THRESH_0 _MMIO(0x6B230)
#define DSCA_RC_BUF_THRESH_0_UDW _MMIO(0x6B230 + 4)
diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c
index c45c4bbc3f95..9832a4ade318 100644
--- a/drivers/gpu/drm/i915/display/intel_vga.c
+++ b/drivers/gpu/drm/i915/display/intel_vga.c
@@ -18,6 +18,23 @@
#include "intel_vga.h"
#include "intel_vga_regs.h"
+static unsigned int intel_gmch_ctrl_reg(struct intel_display *display)
+{
+ return DISPLAY_VER(display) >= 6 ? SNB_GMCH_CTRL : I830_GMCH_CTRL;
+}
+
+static bool intel_vga_decode_is_enabled(struct intel_display *display)
+{
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
+ u16 gmch_ctrl = 0;
+
+ if (pci_bus_read_config_word(pdev->bus, PCI_DEVFN(0, 0),
+ intel_gmch_ctrl_reg(display), &gmch_ctrl))
+ return false;
+
+ return !(gmch_ctrl & INTEL_GMCH_VGA_DISABLE);
+}
+
static i915_reg_t intel_vga_cntrl_reg(struct intel_display *display)
{
if (display->platform.valleyview || display->platform.cherryview)
@@ -41,101 +58,266 @@ static bool has_vga_pipe_sel(struct intel_display *display)
return DISPLAY_VER(display) < 7;
}
-/* Disable the VGA plane that we never use */
-void intel_vga_disable(struct intel_display *display)
+static bool has_vga_mmio_access(struct intel_display *display)
{
- struct pci_dev *pdev = to_pci_dev(display->drm->dev);
- i915_reg_t vga_reg = intel_vga_cntrl_reg(display);
- enum pipe pipe;
- u32 tmp;
- u8 sr1;
+ /* WaEnableVGAAccessThroughIOPort:ctg+ */
+ return DISPLAY_VER(display) < 5 && !display->platform.g4x;
+}
- tmp = intel_de_read(display, vga_reg);
- if (tmp & VGA_DISP_DISABLE)
- return;
+static bool intel_pci_has_vga_io_decode(struct pci_dev *pdev)
+{
+ u16 cmd = 0;
- if (display->platform.cherryview)
- pipe = REG_FIELD_GET(VGA_PIPE_SEL_MASK_CHV, tmp);
- else if (has_vga_pipe_sel(display))
- pipe = REG_FIELD_GET(VGA_PIPE_SEL_MASK, tmp);
- else
- pipe = PIPE_A;
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ if ((cmd & PCI_COMMAND_IO) == 0)
+ return false;
- drm_dbg_kms(display->drm, "Disabling VGA plane on pipe %c\n",
- pipe_name(pipe));
+ pdev = pdev->bus->self;
+ while (pdev) {
+ u16 ctl = 0;
- /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
- vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
- outb(0x01, VGA_SEQ_I);
- sr1 = inb(VGA_SEQ_D);
- outb(sr1 | VGA_SR01_SCREEN_OFF, VGA_SEQ_D);
- vga_put(pdev, VGA_RSRC_LEGACY_IO);
- udelay(300);
+ pci_read_config_word(pdev, PCI_BRIDGE_CONTROL, &ctl);
+ if ((ctl & PCI_BRIDGE_CTL_VGA) == 0)
+ return false;
- intel_de_write(display, vga_reg, VGA_DISP_DISABLE);
- intel_de_posting_read(display, vga_reg);
+ pdev = pdev->bus->self;
+ }
+
+ return true;
+}
+
+static bool intel_pci_set_io_decode(struct pci_dev *pdev, bool enable)
+{
+ u16 old = 0, cmd;
+
+ pci_read_config_word(pdev, PCI_COMMAND, &old);
+ cmd = old & ~PCI_COMMAND_IO;
+ if (enable)
+ cmd |= PCI_COMMAND_IO;
+ pci_write_config_word(pdev, PCI_COMMAND, cmd);
+
+ return old & PCI_COMMAND_IO;
}
-void intel_vga_reset_io_mem(struct intel_display *display)
+static bool intel_pci_bridge_set_vga(struct pci_dev *pdev, bool enable)
+{
+ u16 old = 0, ctl;
+
+ pci_read_config_word(pdev->bus->self, PCI_BRIDGE_CONTROL, &old);
+ ctl = old & ~PCI_BRIDGE_CTL_VGA;
+ if (enable)
+ ctl |= PCI_BRIDGE_CTL_VGA;
+ pci_write_config_word(pdev->bus->self, PCI_BRIDGE_CONTROL, ctl);
+
+ return old & PCI_BRIDGE_CTL_VGA;
+}
+
+static int intel_vga_get(struct intel_display *display, bool mmio,
+ bool *old_io_decode)
{
struct pci_dev *pdev = to_pci_dev(display->drm->dev);
+ int err;
+
+ if (mmio) {
+ *old_io_decode = false;
+ return 0;
+ }
/*
- * After we re-enable the power well, if we touch VGA register 0x3d5
- * we'll get unclaimed register interrupts. This stops after we write
- * anything to the VGA MSR register. The vgacon module uses this
- * register all the time, so if we unbind our driver and, as a
- * consequence, bind vgacon, we'll get stuck in an infinite loop at
- * console_unlock(). So make here we touch the VGA MSR register, making
- * sure vgacon can keep working normally without triggering interrupts
- * and error messages.
+ * Bypass the VGA arbiter on the iGPU and just enable
+ * IO decode by hand. This avoids clobbering the VGA
+ * routing for an external GPU when it's the current
+ * VGA device, and thus prevents the all 0xff/white
+ * readout from VGA memory when taking over from vgacon.
+ *
+ * The iGPU has the highest VGA decode priority so it will
+ * grab any VGA IO access when IO decode is enabled, regardless
+ * of how any other VGA routing bits are configured.
*/
- vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
- outb(inb(VGA_MIS_R), VGA_MIS_W);
- vga_put(pdev, VGA_RSRC_LEGACY_IO);
+ if (display->platform.dgfx) {
+ err = vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
+ if (err)
+ return err;
+ }
+
+ *old_io_decode = intel_pci_set_io_decode(pdev, true);
+
+ return 0;
}
-static int intel_gmch_vga_set_state(struct intel_display *display, bool enable_decode)
+static void intel_vga_put(struct intel_display *display, bool io_decode, bool mmio)
{
struct pci_dev *pdev = to_pci_dev(display->drm->dev);
- unsigned int reg = DISPLAY_VER(display) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
- u16 gmch_ctrl;
- if (pci_bus_read_config_word(pdev->bus, PCI_DEVFN(0, 0), reg, &gmch_ctrl)) {
- drm_err(display->drm, "failed to read control word\n");
- return -EIO;
- }
+ if (mmio)
+ return;
- if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !enable_decode)
- return 0;
+ /* see intel_vga_get() */
+ intel_pci_set_io_decode(pdev, io_decode);
+
+ if (display->platform.dgfx)
+ vga_put(pdev, VGA_RSRC_LEGACY_IO);
+}
- if (enable_decode)
- gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
+u8 intel_vga_read(struct intel_display *display, u16 reg, bool mmio)
+{
+ if (mmio)
+ return intel_de_read8(display, _MMIO(reg));
else
- gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
+ return inb(reg);
+}
+
+static void intel_vga_write(struct intel_display *display, u16 reg, u8 val, bool mmio)
+{
+ if (mmio)
+ intel_de_write8(display, _MMIO(reg), val);
+ else
+ outb(val, reg);
+}
+
+/* Disable the VGA plane that we never use */
+void intel_vga_disable(struct intel_display *display)
+{
+ struct pci_dev *pdev = to_pci_dev(display->drm->dev);
+ i915_reg_t vga_reg = intel_vga_cntrl_reg(display);
+ bool mmio = has_vga_mmio_access(display);
+ bool io_decode;
+ u8 msr, sr1;
+ u32 tmp;
+ int err;
- if (pci_bus_write_config_word(pdev->bus, PCI_DEVFN(0, 0), reg, gmch_ctrl)) {
- drm_err(display->drm, "failed to write control word\n");
- return -EIO;
+ if (!intel_vga_decode_is_enabled(display)) {
+ drm_dbg_kms(display->drm, "VGA decode is disabled\n");
+
+ /*
+ * On older hardware VGA_DISP_DISABLE defaults to 0, but
+ * it *must* be set or else the pipe will be completely
+ * stuck (at least on g4x).
+ */
+ goto reset_vgacntr;
}
- return 0;
+ tmp = intel_de_read(display, vga_reg);
+
+ if ((tmp & VGA_DISP_DISABLE) == 0) {
+ enum pipe pipe;
+
+ if (display->platform.cherryview)
+ pipe = REG_FIELD_GET(VGA_PIPE_SEL_MASK_CHV, tmp);
+ else if (has_vga_pipe_sel(display))
+ pipe = REG_FIELD_GET(VGA_PIPE_SEL_MASK, tmp);
+ else
+ pipe = PIPE_A;
+
+ drm_dbg_kms(display->drm, "Disabling VGA plane on pipe %c\n",
+ pipe_name(pipe));
+ } else {
+ drm_dbg_kms(display->drm, "VGA plane is disabled\n");
+
+ /*
+ * Unfortunately at least some BIOSes (eg. HSW Lenovo
+ * ThinkCentre E73) set up the VGA registers even when
+ * in UEFI mode with the VGA plane disabled. So we need to
+ * always clean up the mess for iGPUs. For discrete GPUs we
+ * don't really care about the state of the VGA registers
+ * since all VGA accesses can be blocked via the bridge.
+ */
+ if (display->platform.dgfx)
+ goto reset_vgacntr;
+ }
+
+ /*
+ * This should not fail, because the vga_get() family of functions
+ * will only report errors for dGPUs that are unreachable via the
+ * bridge, and cannot be made reachable either. We shouldn't even
+ * get here for this case, but if we do, we assume that the bridge
+ * will also refuse future requests to forward VGA accesses.
+ */
+ err = intel_vga_get(display, mmio, &io_decode);
+ if (err)
+ goto reset_vgacntr;
+
+ drm_WARN_ON(display->drm, !mmio && !intel_pci_has_vga_io_decode(pdev));
+
+ intel_vga_write(display, VGA_SEQ_I, 0x01, mmio);
+ sr1 = intel_vga_read(display, VGA_SEQ_D, mmio);
+ sr1 |= VGA_SR01_SCREEN_OFF;
+ intel_vga_write(display, VGA_SEQ_D, sr1, mmio);
+
+ msr = intel_vga_read(display, VGA_MIS_R, mmio);
+ /*
+ * Always disable VGA memory decode for iGPU so that
+ * intel_vga_set_decode() doesn't need to access VGA registers.
+ * VGA_MIS_ENB_MEM_ACCESS=0 is also the reset value.
+ */
+ msr &= ~VGA_MIS_ENB_MEM_ACCESS;
+ /*
+ * VGA_MIS_COLOR controls both GPU level and display engine level
+ * MDA vs. CGA decode logic. But when the register gets reset
+ * (reset value has VGA_MIS_COLOR=0) by the power well, only the
+ * display engine level decode logic gets notified.
+ *
+ * Switch to MDA mode to make sure the GPU level decode logic will
+ * be in sync with the display engine level decode logic after the
+ * power well has been reset. Otherwise the GPU will claim CGA
+ * register accesses but the display engine will not, causing
+ * RMbus NoClaim errors.
+ */
+ msr &= ~VGA_MIS_COLOR;
+ intel_vga_write(display, VGA_MIS_W, msr, mmio);
+
+ intel_vga_put(display, io_decode, mmio);
+
+ /*
+ * Inform the arbiter about VGA memory decode being disabled so
+ * that it doesn't disable all memory decode for the iGPU when
+ * targeting another GPU.
+ */
+ if (!display->platform.dgfx)
+ vga_set_legacy_decoding(pdev, VGA_RSRC_LEGACY_IO);
+
+ udelay(300);
+
+reset_vgacntr:
+ intel_de_write(display, vga_reg, VGA_DISP_DISABLE);
+ intel_de_posting_read(display, vga_reg);
}
-static unsigned int intel_gmch_vga_set_decode(struct pci_dev *pdev, bool enable_decode)
+static unsigned int intel_vga_set_decode(struct pci_dev *pdev, bool enable_decode)
{
struct intel_display *display = to_intel_display(pdev);
+ unsigned int decodes = VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
- intel_gmch_vga_set_state(display, enable_decode);
+ drm_dbg_kms(display->drm, "%s VGA decode due to VGA arbitration\n",
+ str_enable_disable(enable_decode));
- if (enable_decode)
- return VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
- VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
- else
- return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+ /*
+ * Can't use GMCH_CTRL INTEL_GMCH_VGA_DISABLE to disable VGA
+ * decode on ILK+ since the register is locked. Instead
+ * intel_disable_vga() will disable VGA memory decode for the
+ * iGPU, and here we just need to take care of the IO decode.
+ * For discrete GPUs we rely on the bridge VGA control.
+ *
+ * We can't disable IO decode already in intel_vga_disable()
+ * because at least some laptops (eg. CTG Dell Latitude E5400)
+ * will hang during reboot/shutfown with IO decode disabled.
+ */
+ if (display->platform.dgfx) {
+ if (!enable_decode)
+ intel_pci_bridge_set_vga(pdev, false);
+ else
+ decodes |= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
+ } else {
+ if (!enable_decode)
+ intel_pci_set_io_decode(pdev, false);
+ else
+ decodes |= VGA_RSRC_LEGACY_IO;
+ }
+
+ return decodes;
}
-int intel_vga_register(struct intel_display *display)
+void intel_vga_register(struct intel_display *display)
{
struct pci_dev *pdev = to_pci_dev(display->drm->dev);
@@ -149,11 +331,8 @@ int intel_vga_register(struct intel_display *display)
* then we do not take part in VGA arbitration and the
* vga_client_register() fails with -ENODEV.
*/
- ret = vga_client_register(pdev, intel_gmch_vga_set_decode);
- if (ret && ret != -ENODEV)
- return ret;
-
- return 0;
+ ret = vga_client_register(pdev, intel_vga_set_decode);
+ drm_WARN_ON(display->drm, ret && ret != -ENODEV);
}
void intel_vga_unregister(struct intel_display *display)
diff --git a/drivers/gpu/drm/i915/display/intel_vga.h b/drivers/gpu/drm/i915/display/intel_vga.h
index 16d699f3b641..72131cb536cd 100644
--- a/drivers/gpu/drm/i915/display/intel_vga.h
+++ b/drivers/gpu/drm/i915/display/intel_vga.h
@@ -6,11 +6,14 @@
#ifndef __INTEL_VGA_H__
#define __INTEL_VGA_H__
+#include <linux/types.h>
+
struct intel_display;
+u8 intel_vga_read(struct intel_display *display, u16 reg, bool mmio);
void intel_vga_reset_io_mem(struct intel_display *display);
void intel_vga_disable(struct intel_display *display);
-int intel_vga_register(struct intel_display *display);
+void intel_vga_register(struct intel_display *display);
void intel_vga_unregister(struct intel_display *display);
#endif /* __INTEL_VGA_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index db74744ddb31..8a957804cb97 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -6,6 +6,7 @@
#include <drm/drm_print.h>
+#include "intel_alpm.h"
#include "intel_crtc.h"
#include "intel_de.h"
#include "intel_display_regs.h"
@@ -520,6 +521,7 @@ int intel_vrr_compute_optimized_guardband(struct intel_crtc_state *crtc_state)
if (intel_crtc_has_dp_encoder(crtc_state)) {
guardband = max(guardband, intel_psr_min_guardband(crtc_state));
guardband = max(guardband, intel_dp_sdp_min_guardband(crtc_state, true));
+ guardband = max(guardband, intel_alpm_lobf_min_guardband(crtc_state));
}
return guardband;
@@ -598,6 +600,18 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
return;
/*
+ * Bspec says:
+ * "(note: VRR needs to be programmed after
+ * TRANS_DDI_FUNC_CTL and before TRANS_CONF)."
+ *
+ * In practice it turns out that ICL can hang if
+ * TRANS_VRR_VMAX/FLIPLINE are written before
+ * enabling TRANS_DDI_FUNC_CTL.
+ */
+ drm_WARN_ON(display->drm,
+ !(intel_de_read(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE));
+
+ /*
* This bit seems to have two meanings depending on the platform:
* TGL: generate VRR "safe window" for DSB vblank waits
* ADL/DG2: make TRANS_SET_CONTEXT_LATENCY effective with VRR
@@ -676,13 +690,32 @@ intel_vrr_dcb_reset(const struct intel_crtc_state *old_crtc_state,
intel_de_write(display, PIPEDMC_DCB_BALANCE_RESET(pipe), 0);
}
+static u32 trans_vrr_push(const struct intel_crtc_state *crtc_state,
+ bool send_push)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ u32 trans_vrr_push = 0;
+
+ if (intel_vrr_always_use_vrr_tg(display) ||
+ crtc_state->vrr.enable)
+ trans_vrr_push |= TRANS_PUSH_EN;
+
+ if (send_push)
+ trans_vrr_push |= TRANS_PUSH_SEND;
+
+ if (HAS_PSR_TRANS_PUSH_FRAME_CHANGE(display))
+ trans_vrr_push |= LNL_TRANS_PUSH_PSR_PR_EN;
+
+ return trans_vrr_push;
+}
+
void intel_vrr_send_push(struct intel_dsb *dsb,
const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- if (!crtc_state->vrr.enable)
+ if (!crtc_state->vrr.enable && !intel_psr_use_trans_push(crtc_state))
return;
if (dsb)
@@ -690,8 +723,7 @@ void intel_vrr_send_push(struct intel_dsb *dsb,
intel_de_write_dsb(display, dsb,
TRANS_PUSH(display, cpu_transcoder),
- TRANS_PUSH_EN | TRANS_PUSH_SEND);
-
+ trans_vrr_push(crtc_state, true));
if (dsb)
intel_dsb_nonpost_end(dsb);
}
@@ -876,7 +908,8 @@ static void intel_vrr_tg_enable(const struct intel_crtc_state *crtc_state,
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 vrr_ctl;
- intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), TRANS_PUSH_EN);
+ intel_de_write(display, TRANS_PUSH(display, cpu_transcoder),
+ trans_vrr_push(crtc_state, false));
vrr_ctl = VRR_CTL_VRR_ENABLE | trans_vrr_ctl(crtc_state);
@@ -904,7 +937,8 @@ static void intel_vrr_tg_disable(const struct intel_crtc_state *old_crtc_state)
VRR_STATUS_VRR_EN_LIVE, 1000))
drm_err(display->drm, "Timed out waiting for VRR live status to clear\n");
- intel_de_write(display, TRANS_PUSH(display, cpu_transcoder), 0);
+ intel_de_rmw(display, TRANS_PUSH(display, cpu_transcoder),
+ TRANS_PUSH_EN, 0);
}
void intel_vrr_enable(const struct intel_crtc_state *crtc_state)
@@ -939,6 +973,8 @@ void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
+ intel_vrr_set_transcoder_timings(crtc_state);
+
if (!intel_vrr_possible(crtc_state))
return;
@@ -957,6 +993,15 @@ void intel_vrr_transcoder_disable(const struct intel_crtc_state *old_crtc_state)
intel_vrr_tg_disable(old_crtc_state);
}
+void intel_vrr_psr_frame_change_enable(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+
+ intel_de_write(display, TRANS_PUSH(display, cpu_transcoder),
+ trans_vrr_push(crtc_state, false));
+}
+
bool intel_vrr_is_fixed_rr(const struct intel_crtc_state *crtc_state)
{
return crtc_state->vrr.flipline &&
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.h b/drivers/gpu/drm/i915/display/intel_vrr.h
index bedcc8c4bff2..4f16ca4af91f 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.h
+++ b/drivers/gpu/drm/i915/display/intel_vrr.h
@@ -33,6 +33,7 @@ void intel_vrr_dcb_increment_flip_count(struct intel_crtc_state *crtc_state,
struct intel_crtc *crtc);
bool intel_vrr_is_push_sent(const struct intel_crtc_state *crtc_state);
void intel_vrr_disable(const struct intel_crtc_state *old_crtc_state);
+void intel_vrr_psr_frame_change_enable(const struct intel_crtc_state *crtc_state);
void intel_vrr_get_config(struct intel_crtc_state *crtc_state);
int intel_vrr_vmax_vtotal(const struct intel_crtc_state *crtc_state);
int intel_vrr_vmin_vtotal(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_vrr_regs.h b/drivers/gpu/drm/i915/display/intel_vrr_regs.h
index 427ada0d3973..9d4d6573a149 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_vrr_regs.h
@@ -165,6 +165,7 @@
#define TRANS_PUSH(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_PUSH_A)
#define TRANS_PUSH_EN REG_BIT(31)
#define TRANS_PUSH_SEND REG_BIT(30)
+#define LNL_TRANS_PUSH_PSR_PR_EN REG_BIT(16)
#define _TRANS_VRR_VSYNC_A 0x60078
#define TRANS_VRR_VSYNC(display, trans) _MMIO_TRANS2((display), (trans), _TRANS_VRR_VSYNC_A)
diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c
index 4c4deac7f9c8..7c5cb188ebf0 100644
--- a/drivers/gpu/drm/i915/display/skl_scaler.c
+++ b/drivers/gpu/drm/i915/display/skl_scaler.c
@@ -823,7 +823,7 @@ void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
crtc_state->scaler_state.scaler_id < 0))
return;
- if (intel_display_wa(display, 14011503117))
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_14011503117))
adl_scaler_ecc_mask(crtc_state);
drm_rect_init(&src, 0, 0,
diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c
index 746e942cafd2..11ba42c67e3e 100644
--- a/drivers/gpu/drm/i915/display/skl_universal_plane.c
+++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c
@@ -17,7 +17,7 @@
#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_display_utils.h"
-#include "intel_dpt.h"
+#include "intel_display_wa.h"
#include "intel_fb.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
@@ -1217,7 +1217,7 @@ static u32 skl_plane_ctl(const struct intel_plane_state *plane_state)
plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
/* Wa_22012358565:adl-p */
- if (DISPLAY_VER(display) == 13)
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_22012358565))
plane_ctl |= adlp_plane_ctl_arb_slots(plane_state);
return plane_ctl;
@@ -2793,8 +2793,7 @@ static bool tgl_plane_has_mc_ccs(struct intel_display *display,
enum plane_id plane_id)
{
/* Wa_14010477008 */
- if (display->platform.dg1 || display->platform.rocketlake ||
- (display->platform.tigerlake && IS_DISPLAY_STEP(display, STEP_A0, STEP_D0)))
+ if (intel_display_wa(display, INTEL_DISPLAY_WA_14010477008))
return false;
return plane_id < PLANE_6;
diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c
index f5a6fae815d1..b1f9546b8cda 100644
--- a/drivers/gpu/drm/i915/display/skl_watermark.c
+++ b/drivers/gpu/drm/i915/display/skl_watermark.c
@@ -7,8 +7,8 @@
#include <drm/drm_blend.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_pcode_regs.h>
-#include "i915_reg.h"
#include "i9xx_wm.h"
#include "intel_atomic.h"
#include "intel_bw.h"
@@ -22,11 +22,12 @@
#include "intel_display_rpm.h"
#include "intel_display_types.h"
#include "intel_display_utils.h"
+#include "intel_display_wa.h"
#include "intel_dram.h"
#include "intel_fb.h"
#include "intel_fixed.h"
#include "intel_flipq.h"
-#include "intel_pcode.h"
+#include "intel_parent.h"
#include "intel_plane.h"
#include "intel_vblank.h"
#include "intel_wm.h"
@@ -115,9 +116,8 @@ intel_sagv_block_time(struct intel_display *display)
u32 val = 0;
int ret;
- ret = intel_pcode_read(display->drm,
- GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
- &val, NULL);
+ ret = intel_parent_pcode_read(display, GEN12_PCODE_READ_SAGV_BLOCK_TIME_US,
+ &val, NULL);
if (ret) {
drm_dbg_kms(display->drm, "Couldn't read SAGV block time!\n");
return 0;
@@ -184,8 +184,8 @@ static void skl_sagv_enable(struct intel_display *display)
return;
drm_dbg_kms(display->drm, "Enabling SAGV\n");
- ret = intel_pcode_write(display->drm, GEN9_PCODE_SAGV_CONTROL,
- GEN9_SAGV_ENABLE);
+ ret = intel_parent_pcode_write(display, GEN9_PCODE_SAGV_CONTROL,
+ GEN9_SAGV_ENABLE);
/* We don't need to wait for SAGV when enabling */
@@ -217,9 +217,9 @@ static void skl_sagv_disable(struct intel_display *display)
drm_dbg_kms(display->drm, "Disabling SAGV\n");
/* bspec says to keep retrying for at least 1 ms */
- ret = intel_pcode_request(display->drm, GEN9_PCODE_SAGV_CONTROL,
- GEN9_SAGV_DISABLE,
- GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED, 1);
+ ret = intel_parent_pcode_request(display, GEN9_PCODE_SAGV_CONTROL,
+ GEN9_SAGV_DISABLE,
+ GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED, 1);
/*
* Some skl systems, pre-release machines in particular,
* don't actually have SAGV.
@@ -3283,7 +3283,7 @@ static void skl_read_wm_latency(struct intel_display *display)
/* read the first set of memory latencies[0:3] */
val = 0; /* data0 to be programmed to 0 for first set */
- ret = intel_pcode_read(display->drm, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
+ ret = intel_parent_pcode_read(display, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
if (ret) {
drm_err(display->drm, "SKL Mailbox read error = %d\n", ret);
return;
@@ -3296,7 +3296,7 @@ static void skl_read_wm_latency(struct intel_display *display)
/* read the second set of memory latencies[4:7] */
val = 1; /* data0 to be programmed to 1 for second set */
- ret = intel_pcode_read(display->drm, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
+ ret = intel_parent_pcode_read(display, GEN9_PCODE_READ_MEM_LATENCY, &val, NULL);
if (ret) {
drm_err(display->drm, "SKL Mailbox read error = %d\n", ret);
return;
@@ -3413,7 +3413,7 @@ static u32 pipe_mbus_dbox_ctl(const struct intel_crtc *crtc,
if (DISPLAY_VER(display) >= 14)
val |= dbuf_state->joined_mbus ?
MBUS_DBOX_A_CREDIT(12) : MBUS_DBOX_A_CREDIT(8);
- else if (display->platform.alderlake_p)
+ else if (intel_display_wa(display, INTEL_DISPLAY_WA_22010947358))
/* Wa_22010947358:adl-p */
val |= dbuf_state->joined_mbus ?
MBUS_DBOX_A_CREDIT(6) : MBUS_DBOX_A_CREDIT(4);
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index 60857d2afdb1..36591d724638 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -33,7 +33,6 @@
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
-#include "i915_reg.h"
#include "intel_atomic.h"
#include "intel_backlight.h"
#include "intel_connector.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
index 30cc08583cbd..7782ba44fabd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_clflush.c
@@ -22,7 +22,7 @@ static void __do_clflush(struct drm_i915_gem_object *obj)
GEM_BUG_ON(!i915_gem_object_has_pages(obj));
drm_clflush_sg(obj->mm.pages);
- i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
+ i915_gem_object_frontbuffer_flush(obj, ORIGIN_CPU);
}
static void clflush_work(struct dma_fence_work *base)
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
index ef3b14ae2e0d..df7502391b50 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
@@ -68,7 +68,7 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
i915_vma_flush_writes(vma);
spin_unlock(&obj->vma.lock);
- i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
+ i915_gem_object_frontbuffer_flush(obj, ORIGIN_CPU);
break;
case I915_GEM_DOMAIN_WC:
@@ -647,7 +647,7 @@ out_unlock:
i915_gem_object_unlock(obj);
if (!err && write_domain)
- i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
+ i915_gem_object_frontbuffer_invalidate(obj, ORIGIN_CPU);
out:
i915_gem_object_put(obj);
@@ -759,7 +759,7 @@ int i915_gem_object_prepare_write(struct drm_i915_gem_object *obj,
}
out:
- i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
+ i915_gem_object_frontbuffer_invalidate(obj, ORIGIN_CPU);
obj->mm.dirty = true;
/* return with the pages pinned */
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 3f6f040c359d..5172d3982654 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -372,12 +372,12 @@ void __i915_gem_object_pages_fini(struct drm_i915_gem_object *obj)
* and ttm_bo_cleanup_memtype_use() shouldn't be invoked for
* dma-buf, so it's safe to take the lock.
*/
- if (obj->base.import_attach)
+ if (drm_gem_is_imported(&obj->base))
i915_gem_object_lock(obj, NULL);
__i915_gem_object_put_pages(obj);
- if (obj->base.import_attach)
+ if (drm_gem_is_imported(&obj->base))
i915_gem_object_unlock(obj);
GEM_BUG_ON(i915_gem_object_has_pages(obj));
@@ -391,7 +391,7 @@ void __i915_gem_free_object(struct drm_i915_gem_object *obj)
bitmap_free(obj->bit_17);
- if (obj->base.import_attach)
+ if (drm_gem_is_imported(&obj->base))
drm_prime_gem_destroy(&obj->base, NULL);
drm_gem_free_mmap_offset(&obj->base);
@@ -474,30 +474,6 @@ static void i915_gem_free_object(struct drm_gem_object *gem_obj)
queue_work(i915->wq, &i915->mm.free_work);
}
-void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
- enum fb_op_origin origin)
-{
- struct i915_frontbuffer *front;
-
- front = i915_gem_object_frontbuffer_lookup(obj);
- if (front) {
- intel_frontbuffer_flush(&front->base, origin);
- i915_gem_object_frontbuffer_put(front);
- }
-}
-
-void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
- enum fb_op_origin origin)
-{
- struct i915_frontbuffer *front;
-
- front = i915_gem_object_frontbuffer_lookup(obj);
- if (front) {
- intel_frontbuffer_invalidate(&front->base, origin);
- i915_gem_object_frontbuffer_put(front);
- }
-}
-
static void
i915_gem_object_read_from_page_kmap(struct drm_i915_gem_object *obj, u64 offset, void *dst, int size)
{
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.c
index adba3fa96c05..f885c4fb1326 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.c
@@ -1,6 +1,8 @@
// SPDX-License-Identifier: MIT
/* Copyright © 2025 Intel Corporation */
+#include <drm/intel/display_parent_interface.h>
+
#include "i915_drv.h"
#include "i915_gem_object_frontbuffer.h"
@@ -101,3 +103,70 @@ void i915_gem_object_frontbuffer_put(struct i915_frontbuffer *front)
kref_put_lock(&front->ref, frontbuffer_release,
&i915->frontbuffer_lock);
}
+
+void __i915_gem_object_frontbuffer_flush(struct drm_i915_gem_object *obj,
+ enum fb_op_origin origin)
+{
+ struct i915_frontbuffer *front;
+
+ front = i915_gem_object_frontbuffer_lookup(obj);
+ if (front) {
+ intel_frontbuffer_flush(&front->base, origin);
+ i915_gem_object_frontbuffer_put(front);
+ }
+}
+
+void __i915_gem_object_frontbuffer_invalidate(struct drm_i915_gem_object *obj,
+ enum fb_op_origin origin)
+{
+ struct i915_frontbuffer *front;
+
+ front = i915_gem_object_frontbuffer_lookup(obj);
+ if (front) {
+ intel_frontbuffer_invalidate(&front->base, origin);
+ i915_gem_object_frontbuffer_put(front);
+ }
+}
+
+static struct intel_frontbuffer *i915_frontbuffer_get(struct drm_gem_object *_obj)
+{
+ struct drm_i915_gem_object *obj = to_intel_bo(_obj);
+ struct i915_frontbuffer *front;
+
+ front = i915_gem_object_frontbuffer_get(obj);
+ if (!front)
+ return NULL;
+
+ return &front->base;
+}
+
+static void i915_frontbuffer_ref(struct intel_frontbuffer *_front)
+{
+ struct i915_frontbuffer *front =
+ container_of(_front, typeof(*front), base);
+
+ i915_gem_object_frontbuffer_ref(front);
+}
+
+static void i915_frontbuffer_put(struct intel_frontbuffer *_front)
+{
+ struct i915_frontbuffer *front =
+ container_of(_front, typeof(*front), base);
+
+ return i915_gem_object_frontbuffer_put(front);
+}
+
+static void i915_frontbuffer_flush_for_display(struct intel_frontbuffer *_front)
+{
+ struct i915_frontbuffer *front =
+ container_of(_front, typeof(*front), base);
+
+ i915_gem_object_flush_if_display(front->obj);
+}
+
+const struct intel_display_frontbuffer_interface i915_display_frontbuffer_interface = {
+ .get = i915_frontbuffer_get,
+ .ref = i915_frontbuffer_ref,
+ .put = i915_frontbuffer_put,
+ .flush_for_display = i915_frontbuffer_flush_for_display,
+};
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h
index 2133e29047c5..9c6d91f21c19 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_frontbuffer.h
@@ -20,31 +20,41 @@ struct i915_frontbuffer {
struct kref ref;
};
-void __i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
+void __i915_gem_object_frontbuffer_flush(struct drm_i915_gem_object *obj,
enum fb_op_origin origin);
-void __i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
+void __i915_gem_object_frontbuffer_invalidate(struct drm_i915_gem_object *obj,
enum fb_op_origin origin);
static inline void
-i915_gem_object_flush_frontbuffer(struct drm_i915_gem_object *obj,
+i915_gem_object_frontbuffer_flush(struct drm_i915_gem_object *obj,
enum fb_op_origin origin)
{
if (unlikely(rcu_access_pointer(obj->frontbuffer)))
- __i915_gem_object_flush_frontbuffer(obj, origin);
+ __i915_gem_object_frontbuffer_flush(obj, origin);
}
static inline void
-i915_gem_object_invalidate_frontbuffer(struct drm_i915_gem_object *obj,
+i915_gem_object_frontbuffer_invalidate(struct drm_i915_gem_object *obj,
enum fb_op_origin origin)
{
if (unlikely(rcu_access_pointer(obj->frontbuffer)))
- __i915_gem_object_invalidate_frontbuffer(obj, origin);
+ __i915_gem_object_frontbuffer_invalidate(obj, origin);
}
struct i915_frontbuffer *i915_gem_object_frontbuffer_get(struct drm_i915_gem_object *obj);
void i915_gem_object_frontbuffer_ref(struct i915_frontbuffer *front);
void i915_gem_object_frontbuffer_put(struct i915_frontbuffer *front);
+static inline void i915_gem_object_frontbuffer_track(struct i915_frontbuffer *_old,
+ struct i915_frontbuffer *_new,
+ unsigned int frontbuffer_bits)
+{
+ struct intel_frontbuffer *old = _old ? &_old->base : NULL;
+ struct intel_frontbuffer *new = _new ? &_new->base : NULL;
+
+ intel_frontbuffer_track(old, new, frontbuffer_bits);
+}
+
/**
* i915_gem_object_frontbuffer_lookup - Look up the object's frontbuffer
* @obj: The object whose frontbuffer to look up.
@@ -81,4 +91,6 @@ i915_gem_object_frontbuffer_lookup(const struct drm_i915_gem_object *obj)
return front;
}
+extern const struct intel_display_frontbuffer_interface i915_display_frontbuffer_interface;
+
#endif
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index ce2780ef97ef..e375afbf458e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -155,7 +155,7 @@ int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
* We manually control the domain here and pretend that it
* remains coherent i.e. in the GTT domain, like shmem_pwrite.
*/
- i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
+ i915_gem_object_frontbuffer_invalidate(obj, ORIGIN_CPU);
if (copy_from_user(vaddr, user_data, args->size))
return -EFAULT;
@@ -163,7 +163,7 @@ int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
drm_clflush_virt_range(vaddr, args->size);
intel_gt_chipset_flush(to_gt(i915));
- i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
+ i915_gem_object_frontbuffer_flush(obj, ORIGIN_CPU);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index c6c64ba29bc4..720a9ad39aa2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -153,8 +153,12 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
}
} while (1);
- nr_pages = min_t(unsigned long,
- folio_nr_pages(folio), page_count - i);
+ nr_pages = min_array(((unsigned long[]) {
+ folio_nr_pages(folio),
+ page_count - i,
+ max_segment / PAGE_SIZE,
+ }), 3);
+
if (!i ||
sg->length >= max_segment ||
folio_pfn(folio) != next_pfn) {
@@ -164,7 +168,9 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
st->nents++;
sg_set_folio(sg, folio, nr_pages * PAGE_SIZE, 0);
} else {
- /* XXX: could overflow? */
+ nr_pages = min_t(unsigned long, nr_pages,
+ (max_segment - sg->length) / PAGE_SIZE);
+
sg->length += nr_pages * PAGE_SIZE;
}
next_pfn = folio_pfn(folio) + nr_pages;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
index 3a7e202ae87d..56489cc127d6 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm_move.c
@@ -408,7 +408,7 @@ static void __memcpy_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
if (unlikely(fence->error || I915_SELFTEST_ONLY(fail_gpu_migration))) {
INIT_WORK(&copy_work->work, __memcpy_work);
- queue_work(system_unbound_wq, &copy_work->work);
+ queue_work(system_dfl_wq, &copy_work->work);
} else {
init_irq_work(&copy_work->irq_work, __memcpy_irq_work);
irq_work_queue(&copy_work->irq_work);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index 17829ad57099..9d454d0b46f2 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -1159,6 +1159,7 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
struct drm_i915_gem_object *obj;
struct i915_request *rq = NULL;
struct vm_area_struct *area;
+ struct file *mock_file;
unsigned long addr;
LIST_HEAD(objects);
u64 offset;
@@ -1178,16 +1179,25 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
goto out_put;
/*
- * This will eventually create a GEM context, due to opening dummy drm
- * file, which needs a tiny amount of mappable device memory for the top
- * level paging structures(and perhaps scratch), so make sure we
- * allocate early, to avoid tears.
+ * Pretend to open("/dev/dri/card0"), which will eventually create a GEM
+ * context along with multiple GEM objects (for paging structures and
+ * scratch) that are placed in mappable portion of GPU memory.
+ * Calling fput() on the file places objects' cleanup routines in delayed
+ * worqueues, which execute after unspecified amount of time.
+ * Keep the file open until migration and page fault checks are done to
+ * make sure object cleanup is not executed after igt_fill_mappable()
+ * finishes and before migration is attempted - that would leave a gap
+ * large enough for the migration to succeed, when we'd expect it to fail.
*/
- addr = igt_mmap_offset(i915, offset, obj->base.size,
- PROT_WRITE, MAP_SHARED);
+ mock_file = mock_drm_getfile(i915->drm.primary, O_RDWR);
+ if (IS_ERR(mock_file))
+ return PTR_ERR(mock_file);
+
+ addr = igt_mmap_offset_with_file(i915, offset, obj->base.size,
+ PROT_WRITE, MAP_SHARED, mock_file);
if (IS_ERR_VALUE(addr)) {
err = addr;
- goto out_put;
+ goto out_fput;
}
mmap_read_lock(current->mm);
@@ -1294,6 +1304,9 @@ static int __igt_mmap_migrate(struct intel_memory_region **placements,
out_addr:
vm_munmap(addr, obj->base.size);
+out_fput:
+ fput(mock_file);
+
out_put:
i915_gem_object_put(obj);
igt_close_objects(i915, &objects);
diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
index e8fab45759c3..438cd4724ac4 100644
--- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c
@@ -67,7 +67,7 @@ void gen6_ppgtt_enable(struct intel_gt *gt)
if (HAS_PPGTT(uncore->i915)) /* may be disabled for VT-d */
intel_uncore_write(uncore,
GFX_MODE,
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+ REG_MASKED_FIELD_ENABLE(GFX_PPGTT_ENABLE));
}
/* PPGTT support for Sandybdrige/Gen6 and later */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index d37966ec7a92..c0fd349a4600 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -1233,7 +1233,7 @@ static int intel_engine_init_tlb_invalidation(struct intel_engine_cs *engine)
engine->class == VIDEO_ENHANCEMENT_CLASS ||
engine->class == COMPUTE_CLASS ||
engine->class == OTHER_CLASS))
- engine->tlb_inv.request = _MASKED_BIT_ENABLE(val);
+ engine->tlb_inv.request = REG_MASKED_FIELD_ENABLE(val);
else
engine->tlb_inv.request = val;
@@ -1628,7 +1628,7 @@ static int __intel_engine_stop_cs(struct intel_engine_cs *engine,
const i915_reg_t mode = RING_MI_MODE(engine->mmio_base);
int err;
- intel_uncore_write_fw(uncore, mode, _MASKED_BIT_ENABLE(STOP_RING));
+ intel_uncore_write_fw(uncore, mode, REG_MASKED_FIELD_ENABLE(STOP_RING));
/*
* Wa_22011802037: Prior to doing a reset, ensure CS is
@@ -1636,7 +1636,7 @@ static int __intel_engine_stop_cs(struct intel_engine_cs *engine,
*/
if (intel_engine_reset_needs_wa_22011802037(engine->gt))
intel_uncore_write_fw(uncore, RING_MODE_GEN7(engine->mmio_base),
- _MASKED_BIT_ENABLE(GEN12_GFX_PREFETCH_DISABLE));
+ REG_MASKED_FIELD_ENABLE(GEN12_GFX_PREFETCH_DISABLE));
err = __intel_wait_for_register_fw(engine->uncore, mode,
MODE_IDLE, MODE_IDLE,
@@ -1692,7 +1692,7 @@ void intel_engine_cancel_stop_cs(struct intel_engine_cs *engine)
{
ENGINE_TRACE(engine, "\n");
- ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+ ENGINE_WRITE_FW(engine, RING_MI_MODE, REG_MASKED_FIELD_DISABLE(STOP_RING));
}
static u32 __cs_pending_mi_force_wakes(struct intel_engine_cs *engine)
@@ -1967,7 +1967,8 @@ void intel_engines_reset_default_submission(struct intel_gt *gt)
if (engine->sanitize)
engine->sanitize(engine);
- engine->set_default_submission(engine);
+ if (engine->set_default_submission)
+ engine->set_default_submission(engine);
}
}
@@ -2551,7 +2552,7 @@ void xehp_enable_ccs_engines(struct intel_engine_cs *engine)
return;
intel_uncore_write(engine->uncore, GEN12_RCU_MODE,
- _MASKED_BIT_ENABLE(GEN12_RCU_MODE_CCS_ENABLE));
+ REG_MASKED_FIELD_ENABLE(GEN12_RCU_MODE_CCS_ENABLE));
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index fb7bff27b45a..26196a57041e 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -24,7 +24,7 @@ static void intel_gsc_idle_msg_enable(struct intel_engine_cs *engine)
if (MEDIA_VER(i915) >= 13 && engine->id == GSC0) {
intel_uncore_write(engine->gt->uncore,
RC_PSMI_CTRL_GSCCS,
- _MASKED_BIT_DISABLE(IDLE_MSG_DISABLE));
+ REG_MASKED_FIELD_DISABLE(IDLE_MSG_DISABLE));
/* hysteresis 0xA=5us as recommended in spec*/
intel_uncore_write(engine->gt->uncore,
PWRCTX_MAXCNT_GSCCS,
diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
index cafe0b8e6bdd..1359fc9cb88e 100644
--- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c
@@ -2934,12 +2934,12 @@ static void enable_execlists(struct intel_engine_cs *engine)
intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */
if (GRAPHICS_VER(engine->i915) >= 11)
- mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE);
+ mode = REG_MASKED_FIELD_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE);
else
- mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE);
+ mode = REG_MASKED_FIELD_ENABLE(GFX_RUN_LIST_ENABLE);
ENGINE_WRITE_FW(engine, RING_MODE_GEN7, mode);
- ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+ ENGINE_WRITE_FW(engine, RING_MI_MODE, REG_MASKED_FIELD_DISABLE(STOP_RING));
ENGINE_WRITE_FW(engine,
RING_HWS_PGA,
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
index db995dce914a..ac9aede82320 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c
@@ -6,6 +6,7 @@
#include <linux/highmem.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_gmd_misc_regs.h>
#include "display/intel_display.h"
#include "i915_drv.h"
@@ -915,15 +916,15 @@ void intel_gt_init_swizzling(struct intel_gt *gt)
if (GRAPHICS_VER(i915) == 6)
intel_uncore_write(uncore,
ARB_MODE,
- _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_SNB));
+ REG_MASKED_FIELD_ENABLE(ARB_MODE_SWIZZLE_SNB));
else if (GRAPHICS_VER(i915) == 7)
intel_uncore_write(uncore,
ARB_MODE,
- _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_IVB));
+ REG_MASKED_FIELD_ENABLE(ARB_MODE_SWIZZLE_IVB));
else if (GRAPHICS_VER(i915) == 8)
intel_uncore_write(uncore,
GAMTARBMODE,
- _MASKED_BIT_ENABLE(ARB_MODE_SWIZZLE_BDW));
+ REG_MASKED_FIELD_ENABLE(ARB_MODE_SWIZZLE_BDW));
else
MISSING_CASE(GRAPHICS_VER(i915));
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index ac527d878820..d76121e117e1 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -5,6 +5,7 @@
#include <drm/drm_managed.h>
#include <drm/intel/intel-gtt.h>
+#include <drm/intel/intel_gmd_interrupt_regs.h>
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index 75e802e10be2..d85c849c0081 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -5,6 +5,8 @@
#include <linux/sched/clock.h>
+#include <drm/intel/intel_gmd_interrupt_regs.h>
+
#include "i915_drv.h"
#include "i915_irq.h"
#include "i915_reg.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
index c0aff4b3cbba..babaf16e72f2 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c
@@ -8,6 +8,7 @@
#include <linux/string_helpers.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_pcode_regs.h>
#include "i915_drv.h"
#include "i915_reg.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index 7421ed18d8d1..3ba9b2206b79 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -61,6 +61,9 @@
#define GMD_ID_GRAPHICS _MMIO(0xd8c)
#define GMD_ID_MEDIA _MMIO(MTL_MEDIA_GSI_BASE + 0xd8c)
+#define GMD_ID_ARCH_MASK REG_GENMASK(31, 22)
+#define GMD_ID_RELEASE_MASK REG_GENMASK(21, 14)
+#define GMD_ID_STEP REG_GENMASK(5, 0)
#define MCFG_MCR_SELECTOR _MMIO(0xfd0)
#define MTL_STEER_SEMAPHORE _MMIO(0xfd0)
@@ -318,11 +321,9 @@
#define _RING_FAULT_REG_VCS 0x4194
#define _RING_FAULT_REG_BCS 0x4294
#define _RING_FAULT_REG_VECS 0x4394
-#define RING_FAULT_REG(engine) _MMIO(_PICK((engine)->class, \
- _RING_FAULT_REG_RCS, \
- _RING_FAULT_REG_VCS, \
- _RING_FAULT_REG_VECS, \
- _RING_FAULT_REG_BCS))
+#define RING_FAULT_REG(engine) _MMIO(_PICK_EVEN((engine)->class, \
+ _RING_FAULT_REG_RCS, \
+ _RING_FAULT_REG_VCS))
#define RING_FAULT_VADDR_MASK REG_GENMASK(31, 12) /* pre-bdw */
#define RING_FAULT_ENGINE_ID_MASK REG_GENMASK(16, 12) /* bdw+ */
#define RING_FAULT_GTTSEL_MASK REG_BIT(11) /* pre-bdw */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
index 1154cd2b7c34..a48601395dce 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs_pm.c
@@ -7,6 +7,8 @@
#include <linux/sysfs.h>
#include <linux/printk.h>
+#include <drm/intel/intel_pcode_regs.h>
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_sysfs.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c
index 1d19c073ba2e..bcd707e3d436 100644
--- a/drivers/gpu/drm/i915/gt/intel_llc.c
+++ b/drivers/gpu/drm/i915/gt/intel_llc.c
@@ -6,6 +6,8 @@
#include <asm/tsc.h>
#include <linux/cpufreq.h>
+#include <drm/intel/intel_pcode_regs.h>
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_gt.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index d36e543e98df..147d22907960 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -846,16 +846,16 @@ static void init_common_regs(u32 * const regs,
u32 ctl;
int loc;
- ctl = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
- ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+ ctl = REG_MASKED_FIELD_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH);
+ ctl |= REG_MASKED_FIELD_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
if (inhibit)
ctl |= CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
if (GRAPHICS_VER(engine->i915) < 11)
- ctl |= _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
- CTX_CTRL_RS_CTX_ENABLE);
+ ctl |= REG_MASKED_FIELD_DISABLE(CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT |
+ CTX_CTRL_RS_CTX_ENABLE);
/* Wa_14019159160 - Case 2.*/
if (ctx_needs_runalone(ce))
- ctl |= _MASKED_BIT_ENABLE(GEN12_CTX_CTRL_RUNALONE_MODE);
+ ctl |= REG_MASKED_FIELD_ENABLE(GEN12_CTX_CTRL_RUNALONE_MODE);
regs[CTX_CONTEXT_CONTROL] = ctl;
regs[CTX_TIMESTAMP] = ce->stats.runtime.last;
@@ -1344,7 +1344,7 @@ gen12_invalidate_state_cache(u32 *cs)
{
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(GEN12_CS_DEBUG_MODE2);
- *cs++ = _MASKED_BIT_ENABLE(INSTRUCTION_STATE_CACHE_INVALIDATE);
+ *cs++ = REG_MASKED_FIELD_ENABLE(INSTRUCTION_STATE_CACHE_INVALIDATE);
return cs;
}
@@ -1736,22 +1736,19 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
/* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
{
COMMON_SLICE_CHICKEN2,
- __MASKED_FIELD(GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE,
- 0),
+ REG_MASKED_FIELD_DISABLE(GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE),
},
/* BSpec: 11391 */
{
FF_SLICE_CHICKEN,
- __MASKED_FIELD(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX,
- FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX),
+ REG_MASKED_FIELD_ENABLE(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX),
},
/* BSpec: 11299 */
{
_3D_CHICKEN3,
- __MASKED_FIELD(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX,
- _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX),
+ REG_MASKED_FIELD_ENABLE(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX),
}
};
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 286d49ecc449..e91e5cdca26c 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -7,6 +7,8 @@
#include <linux/string_helpers.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_pcode_regs.h>
+#include <drm/intel/intel_gmd_interrupt_regs.h>
#include "display/vlv_clock.h"
#include "gem/i915_gem_region.h"
@@ -376,9 +378,9 @@ static void chv_rc6_enable(struct intel_rc6 *rc6)
/* Allows RC6 residency counter to work */
intel_uncore_write_fw(uncore, VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
- VLV_MEDIA_RC6_COUNT_EN |
- VLV_RENDER_RC6_COUNT_EN));
+ REG_MASKED_FIELD_ENABLE(VLV_COUNT_RANGE_HIGH |
+ VLV_MEDIA_RC6_COUNT_EN |
+ VLV_RENDER_RC6_COUNT_EN));
/* 3: Enable RC6 */
rc6->ctl_enable = GEN7_RC_CTL_TO_MODE;
@@ -401,11 +403,11 @@ static void vlv_rc6_enable(struct intel_rc6 *rc6)
/* Allows RC6 residency counter to work */
intel_uncore_write_fw(uncore, VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH |
- VLV_MEDIA_RC0_COUNT_EN |
- VLV_RENDER_RC0_COUNT_EN |
- VLV_MEDIA_RC6_COUNT_EN |
- VLV_RENDER_RC6_COUNT_EN));
+ REG_MASKED_FIELD_ENABLE(VLV_COUNT_RANGE_HIGH |
+ VLV_MEDIA_RC0_COUNT_EN |
+ VLV_RENDER_RC0_COUNT_EN |
+ VLV_MEDIA_RC6_COUNT_EN |
+ VLV_RENDER_RC6_COUNT_EN));
rc6->ctl_enable =
GEN7_RC_CTL_TO_MODE | VLV_RC_CTL_CTX_RST_PARALLEL;
@@ -761,17 +763,17 @@ static u64 vlv_residency_raw(struct intel_uncore *uncore, const i915_reg_t reg)
* set the high bit to be safe.
*/
intel_uncore_write_fw(uncore, VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
+ REG_MASKED_FIELD_ENABLE(VLV_COUNT_RANGE_HIGH));
upper = intel_uncore_read_fw(uncore, reg);
do {
tmp = upper;
intel_uncore_write_fw(uncore, VLV_COUNTER_CONTROL,
- _MASKED_BIT_DISABLE(VLV_COUNT_RANGE_HIGH));
+ REG_MASKED_FIELD_DISABLE(VLV_COUNT_RANGE_HIGH));
lower = intel_uncore_read_fw(uncore, reg);
intel_uncore_write_fw(uncore, VLV_COUNTER_CONTROL,
- _MASKED_BIT_ENABLE(VLV_COUNT_RANGE_HIGH));
+ REG_MASKED_FIELD_ENABLE(VLV_COUNT_RANGE_HIGH));
upper = intel_uncore_read_fw(uncore, reg);
} while (upper != tmp && --loop);
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index 41b5036dc538..984d0056c01c 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -586,7 +586,7 @@ static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
return 0;
}
- intel_uncore_write_fw(uncore, reg, _MASKED_BIT_ENABLE(request));
+ intel_uncore_write_fw(uncore, reg, REG_MASKED_FIELD_ENABLE(request));
ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
700, 0, NULL);
if (ret)
@@ -602,7 +602,7 @@ static void gen8_engine_reset_cancel(struct intel_engine_cs *engine)
{
intel_uncore_write_fw(engine->uncore,
RING_RESET_CTL(engine->mmio_base),
- _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET));
+ REG_MASKED_FIELD_DISABLE(RESET_CTL_REQUEST_RESET));
}
static int gen8_reset_engines(struct intel_gt *gt,
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index 8314a4b0505e..064e7cce412f 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -4,6 +4,8 @@
*/
#include <drm/drm_cache.h>
+#include <drm/intel/intel_gmd_interrupt_regs.h>
+#include <drm/intel/intel_gmd_misc_regs.h>
#include "gem/i915_gem_internal.h"
@@ -126,8 +128,7 @@ static void flush_cs_tlb(struct intel_engine_cs *engine)
engine->name);
ENGINE_WRITE_FW(engine, RING_INSTPM,
- _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
- INSTPM_SYNC_FLUSH));
+ REG_MASKED_FIELD_ENABLE(INSTPM_TLB_INVALIDATE | INSTPM_SYNC_FLUSH));
if (__intel_wait_for_register_fw(engine->uncore,
RING_INSTPM(engine->mmio_base),
INSTPM_SYNC_FLUSH, 0,
@@ -170,7 +171,7 @@ static void set_pp_dir(struct intel_engine_cs *engine)
if (GRAPHICS_VER(engine->i915) >= 7) {
ENGINE_WRITE_FW(engine,
RING_MODE_GEN7,
- _MASKED_BIT_ENABLE(GFX_PPGTT_ENABLE));
+ REG_MASKED_FIELD_ENABLE(GFX_PPGTT_ENABLE));
}
}
@@ -274,7 +275,7 @@ static int xcs_resume(struct intel_engine_cs *engine)
if (GRAPHICS_VER(engine->i915) > 2) {
ENGINE_WRITE_FW(engine,
- RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+ RING_MI_MODE, REG_MASKED_FIELD_DISABLE(STOP_RING));
ENGINE_POSTING_READ(engine, RING_MI_MODE);
}
@@ -717,7 +718,7 @@ static int load_pd_dir(struct i915_request *rq,
*cs++ = MI_LOAD_REGISTER_IMM(1);
*cs++ = i915_mmio_reg_offset(RING_INSTPM(engine->mmio_base));
- *cs++ = _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE);
+ *cs++ = REG_MASKED_FIELD_ENABLE(INSTPM_TLB_INVALIDATE);
intel_ring_advance(rq, cs);
@@ -766,8 +767,7 @@ static int mi_set_context(struct i915_request *rq,
*cs++ = i915_mmio_reg_offset(
RING_PSMI_CTL(signaller->mmio_base));
- *cs++ = _MASKED_BIT_ENABLE(
- GEN6_PSMI_SLEEP_MSG_DISABLE);
+ *cs++ = REG_MASKED_FIELD_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE);
}
}
} else if (GRAPHICS_VER(i915) == 5) {
@@ -820,8 +820,7 @@ static int mi_set_context(struct i915_request *rq,
last_reg = RING_PSMI_CTL(signaller->mmio_base);
*cs++ = i915_mmio_reg_offset(last_reg);
- *cs++ = _MASKED_BIT_DISABLE(
- GEN6_PSMI_SLEEP_MSG_DISABLE);
+ *cs++ = REG_MASKED_FIELD_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE);
}
/* Insert a delay before the next switch! */
@@ -1053,7 +1052,7 @@ static void gen6_bsd_submit_request(struct i915_request *request)
* will then assume that it is busy and bring it out of rc6.
*/
intel_uncore_write_fw(uncore, RING_PSMI_CTL(GEN6_BSD_RING_BASE),
- _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+ REG_MASKED_FIELD_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
/* Clear the context id. Here be magic! */
intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0);
@@ -1074,7 +1073,7 @@ static void gen6_bsd_submit_request(struct i915_request *request)
* and so let it sleep to conserve power when idle.
*/
intel_uncore_write_fw(uncore, RING_PSMI_CTL(GEN6_BSD_RING_BASE),
- _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+ REG_MASKED_FIELD_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index 90b7eee78f1f..844f2716a386 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -7,6 +7,7 @@
#include <drm/intel/i915_drm.h>
#include <drm/intel/display_parent_interface.h>
+#include <drm/intel/intel_pcode_regs.h>
#include "display/intel_display_rps.h"
#include "display/vlv_clock.h"
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index b0ee3d0ae681..24ea5d8d529c 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -3,6 +3,8 @@
* Copyright © 2014-2018 Intel Corporation
*/
+#include <drm/intel/intel_gmd_misc_regs.h>
+
#include "i915_drv.h"
#include "i915_reg.h"
#include "i915_mmio_range.h"
@@ -298,39 +300,39 @@ wa_mcr_write_clr(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 clr)
static void
wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
- wa_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val, true);
+ wa_add(wal, reg, 0, REG_MASKED_FIELD_ENABLE(val), val, true);
}
static void
wa_mcr_masked_en(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 val)
{
- wa_mcr_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val, true);
+ wa_mcr_add(wal, reg, 0, REG_MASKED_FIELD_ENABLE(val), val, true);
}
static void
wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
- wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val, true);
+ wa_add(wal, reg, 0, REG_MASKED_FIELD_DISABLE(val), val, true);
}
static void
wa_mcr_masked_dis(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 val)
{
- wa_mcr_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val, true);
+ wa_mcr_add(wal, reg, 0, REG_MASKED_FIELD_DISABLE(val), val, true);
}
static void
wa_masked_field_set(struct i915_wa_list *wal, i915_reg_t reg,
u32 mask, u32 val)
{
- wa_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask, true);
+ wa_add(wal, reg, 0, REG_MASKED_FIELD(mask, val), mask, true);
}
static void
wa_mcr_masked_field_set(struct i915_wa_list *wal, i915_mcr_reg_t reg,
u32 mask, u32 val)
{
- wa_mcr_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask, true);
+ wa_mcr_add(wal, reg, 0, REG_MASKED_FIELD(mask, val), mask, true);
}
static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine,
@@ -664,7 +666,7 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
/* WaEnableFloatBlendOptimization:icl */
wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
- _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE),
+ REG_MASKED_FIELD_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE),
0 /* write-only, so skip validation */,
true);
@@ -1129,7 +1131,7 @@ hsw_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
wa_add(wal,
HSW_ROW_CHICKEN3, 0,
- _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
+ REG_MASKED_FIELD_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
0 /* XXX does this reg exist? */, true);
/* WaVSRefCountFullforceMissDisable:hsw */
@@ -2270,7 +2272,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
IS_DG2(i915)) {
/* Wa_14015150844 */
wa_mcr_add(wal, XEHP_HDC_CHICKEN0, 0,
- _MASKED_BIT_ENABLE(DIS_ATOMIC_CHAINING_TYPED_WRITES),
+ REG_MASKED_FIELD_ENABLE(DIS_ATOMIC_CHAINING_TYPED_WRITES),
0, true);
}
@@ -2661,7 +2663,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
if (IS_GRAPHICS_VER(i915, 4, 6))
/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
wa_add(wal, RING_MI_MODE(RENDER_RING_BASE),
- 0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
+ 0, REG_MASKED_FIELD_ENABLE(VS_TIMER_DISPATCH),
/* XXX bit doesn't stick on Broadwater */
IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH, true);
@@ -2677,7 +2679,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
* enabled.
*/
wa_add(wal, ECOSKPD(RENDER_RING_BASE),
- 0, _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE),
+ 0, REG_MASKED_FIELD_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE),
0 /* XXX bit doesn't stick on Broadwater */,
true);
}
@@ -2877,7 +2879,7 @@ general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_li
* we need to explicitly skip the readback.
*/
wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
- _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
+ REG_MASKED_FIELD_ENABLE(ENABLE_PREFETCH_INTO_IC),
0 /* write-only, so skip validation */,
true);
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 52ec4421a211..1c2764440323 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -624,7 +624,7 @@ int intel_guc_crash_process_msg(struct intel_guc *guc, u32 action)
else
guc_err(guc, "Unknown crash notification: 0x%04X\n", action);
- queue_work(system_unbound_wq, &guc->dead_guc_worker);
+ queue_work(system_dfl_wq, &guc->dead_guc_worker);
return 0;
}
@@ -646,7 +646,7 @@ int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
guc_err(guc, "Received early exception notification!\n");
if (msg & (INTEL_GUC_RECV_MSG_CRASH_DUMP_POSTED | INTEL_GUC_RECV_MSG_EXCEPTION))
- queue_work(system_unbound_wq, &guc->dead_guc_worker);
+ queue_work(system_dfl_wq, &guc->dead_guc_worker);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index 8c4da526d461..1c455d84bf9d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -31,7 +31,7 @@ static void ct_dead_ct_worker_func(struct work_struct *w);
do { \
if (!(ct)->dead_ct_reported) { \
(ct)->dead_ct_reason |= 1 << CT_DEAD_##reason; \
- queue_work(system_unbound_wq, &(ct)->dead_ct_worker); \
+ queue_work(system_dfl_wq, &(ct)->dead_ct_worker); \
} \
} while (0)
#else
@@ -1238,7 +1238,7 @@ static int ct_handle_event(struct intel_guc_ct *ct, struct ct_incoming_msg *requ
list_add_tail(&request->link, &ct->requests.incoming);
spin_unlock_irqrestore(&ct->requests.lock, flags);
- queue_work(system_unbound_wq, &ct->requests.worker);
+ queue_work(system_dfl_wq, &ct->requests.worker);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 142183d3f7fb..788e59cdfac9 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -3385,7 +3385,7 @@ static void guc_context_sched_disable(struct intel_context *ce)
} else if (!intel_context_is_closed(ce) && !guc_id_pressure(guc, ce) &&
delay) {
spin_unlock_irqrestore(&ce->guc_state.lock, flags);
- mod_delayed_work(system_unbound_wq,
+ mod_delayed_work(system_dfl_wq,
&ce->guc_state.sched_disable_delay_work,
msecs_to_jiffies(delay));
} else {
@@ -3611,7 +3611,7 @@ static void guc_context_destroy(struct kref *kref)
* take the GT PM for the first time which isn't allowed from an atomic
* context.
*/
- queue_work(system_unbound_wq, &guc->submission_state.destroyed_worker);
+ queue_work(system_dfl_wq, &guc->submission_state.destroyed_worker);
}
static int guc_context_alloc(struct intel_context *ce)
@@ -4414,9 +4414,9 @@ static void start_engine(struct intel_engine_cs *engine)
{
ENGINE_WRITE_FW(engine,
RING_MODE_GEN7,
- _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
+ REG_MASKED_FIELD_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
- ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING));
+ ENGINE_WRITE_FW(engine, RING_MI_MODE, REG_MASKED_FIELD_DISABLE(STOP_RING));
ENGINE_POSTING_READ(engine, RING_MI_MODE);
}
@@ -5380,7 +5380,7 @@ int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
* A GT reset flushes this worker queue (G2H handler) so we must use
* another worker to trigger a GT reset.
*/
- queue_work(system_unbound_wq, &guc->submission_state.reset_fail_worker);
+ queue_work(system_dfl_wq, &guc->submission_state.reset_fail_worker);
return 0;
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 5a9f7749acff..7fac97fe30a6 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -1065,7 +1065,7 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
/* Start the DMA */
intel_uncore_write_fw(uncore, DMA_CTRL,
- _MASKED_BIT_ENABLE(dma_flags | START_DMA));
+ REG_MASKED_FIELD_ENABLE(dma_flags | START_DMA));
/* Wait for DMA to finish */
ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100, NULL);
@@ -1075,7 +1075,7 @@ static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
intel_uncore_read_fw(uncore, DMA_CTRL));
/* Disable the bits once DMA is over */
- intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
+ intel_uncore_write_fw(uncore, DMA_CTRL, REG_MASKED_FIELD_DISABLE(dma_flags));
intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
index 477c163d2660..63a75272924f 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: MIT
/*
- * Copyright �� 2021 Intel Corporation
+ * Copyright © 2021 Intel Corporation
*/
#include "gt/intel_gt_print.h"
diff --git a/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
index a40e7c32e613..28e8a092f4e7 100644
--- a/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
+++ b/drivers/gpu/drm/i915/gt/uc/selftest_guc_multi_lrc.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: MIT
/*
- * Copyright �� 2019 Intel Corporation
+ * Copyright © 2019 Intel Corporation
*/
#include "gt/intel_gt_print.h"
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index b868a0501886..e4ed47ffd2d8 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -37,6 +37,7 @@
#include <linux/slab.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_gmd_misc_regs.h>
#include "display/i9xx_plane_regs.h"
#include "display/intel_display_regs.h"
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index fe4302c8cae5..00451fcb1005 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -39,7 +39,6 @@
#include "display/i9xx_plane_regs.h"
#include "display/intel_crt_regs.h"
#include "display/intel_cursor_regs.h"
-#include "display/intel_display.h"
#include "display/intel_display_regs.h"
#include "display/intel_dpio_phy.h"
#include "display/intel_dpll_mgr.h"
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index b0d8d3e74ae7..a34f56630af9 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -40,16 +40,18 @@
#include <drm/display/drm_dp.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_pcode_regs.h>
+#include <drm/intel/intel_gmd_interrupt_regs.h>
#include "display/bxt_dpio_phy_regs.h"
#include "display/i9xx_plane_regs.h"
#include "display/intel_crt_regs.h"
#include "display/intel_cursor_regs.h"
#include "display/intel_display_regs.h"
-#include "display/intel_display_types.h"
#include "display/intel_dmc_regs.h"
#include "display/intel_dp_aux_regs.h"
#include "display/intel_dpio_phy.h"
+#include "display/intel_dpll_mgr.h"
#include "display/intel_fbc.h"
#include "display/intel_fdi_regs.h"
#include "display/intel_pps_regs.h"
@@ -79,6 +81,9 @@
#define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
#define PCH_PP_DIVISOR _MMIO(0xc7210)
+#define pipe_name(p) ((p) + 'A')
+#define port_name(p) ((p) + 'A')
+
unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
{
struct drm_i915_private *i915 = gvt->gt->i915;
@@ -558,7 +563,7 @@ static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
int refclk = 100000;
enum dpio_phy phy = DPIO_PHY0;
enum dpio_channel ch = DPIO_CH0;
- struct dpll clock = {};
+ int m1, m2, n, p1, p2, m, p, vco, dot;
u32 temp;
/* Port to PHY mapping is fixed, see bxt_ddi_phy_info{} */
@@ -587,30 +592,25 @@ static u32 bxt_vgpu_get_dp_bitrate(struct intel_vgpu *vgpu, enum port port)
goto out;
}
- clock.m1 = 2;
- clock.m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK,
- vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 0))) << 22;
+ m1 = 2;
+ m2 = REG_FIELD_GET(PORT_PLL_M2_INT_MASK, vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 0))) << 22;
if (vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 3)) & PORT_PLL_M2_FRAC_ENABLE)
- clock.m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK,
- vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 2)));
- clock.n = REG_FIELD_GET(PORT_PLL_N_MASK,
- vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 1)));
- clock.p1 = REG_FIELD_GET(PORT_PLL_P1_MASK,
- vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)));
- clock.p2 = REG_FIELD_GET(PORT_PLL_P2_MASK,
- vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)));
- clock.m = clock.m1 * clock.m2;
- clock.p = clock.p1 * clock.p2 * 5;
-
- if (clock.n == 0 || clock.p == 0) {
+ m2 |= REG_FIELD_GET(PORT_PLL_M2_FRAC_MASK, vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 2)));
+ n = REG_FIELD_GET(PORT_PLL_N_MASK, vgpu_vreg_t(vgpu, BXT_PORT_PLL(phy, ch, 1)));
+ p1 = REG_FIELD_GET(PORT_PLL_P1_MASK, vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)));
+ p2 = REG_FIELD_GET(PORT_PLL_P2_MASK, vgpu_vreg_t(vgpu, BXT_PORT_PLL_EBB_0(phy, ch)));
+ m = m1 * m2;
+ p = p1 * p2 * 5;
+
+ if (n == 0 || p == 0) {
gvt_dbg_dpy("vgpu-%d PORT_%c PLL has invalid divider\n", vgpu->id, port_name(port));
goto out;
}
- clock.vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock.m), clock.n << 22);
- clock.dot = DIV_ROUND_CLOSEST(clock.vco, clock.p);
+ vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, m), n << 22);
+ dot = DIV_ROUND_CLOSEST(vco, p);
- dp_br = clock.dot;
+ dp_br = dot;
out:
return dp_br;
@@ -2047,10 +2047,10 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
bool enable_execlist;
int ret;
- (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
+ (*(u32 *)p_data) &= ~REG_MASKED_FIELD_ENABLE(1);
if (IS_COFFEELAKE(vgpu->gvt->gt->i915) ||
IS_COMETLAKE(vgpu->gvt->gt->i915))
- (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
+ (*(u32 *)p_data) &= ~REG_MASKED_FIELD_ENABLE(2);
write_vreg(vgpu, offset, p_data, bytes);
if (IS_MASKED_BITS_ENABLED(data, 1)) {
@@ -2139,7 +2139,7 @@ static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
if (IS_MASKED_BITS_ENABLED(data, RESET_CTL_REQUEST_RESET))
data |= RESET_CTL_READY_TO_RESET;
- else if (data & _MASKED_BIT_DISABLE(RESET_CTL_REQUEST_RESET))
+ else if (data & REG_MASKED_FIELD_DISABLE(RESET_CTL_REQUEST_RESET))
data &= ~RESET_CTL_READY_TO_RESET;
vgpu_vreg(vgpu, offset) = data;
@@ -2152,7 +2152,7 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
{
u32 data = *(u32 *)p_data;
- (*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(0x18);
+ (*(u32 *)p_data) &= ~REG_MASKED_FIELD_ENABLE(0x18);
write_vreg(vgpu, offset, p_data, bytes);
if (IS_MASKED_BITS_ENABLED(data, 0x10) ||
@@ -2534,7 +2534,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
#define RING_REG(base) _MMIO((base) + 0xd0)
MMIO_RING_F(RING_REG, 4, F_RO, 0,
- ~_MASKED_BIT_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
+ ~REG_MASKED_FIELD_ENABLE(RESET_CTL_REQUEST_RESET), D_BDW_PLUS, NULL,
ring_reset_ctl_write);
#undef RING_REG
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 91d22b1c62e2..f85113218037 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -32,6 +32,7 @@
#include <linux/eventfd.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_gmd_interrupt_regs.h>
#include "display/intel_display_regs.h"
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index d4e9d485d382..a93999ba8092 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -34,6 +34,7 @@
*/
#include <drm/drm_print.h>
+#include <drm/intel/intel_gmd_misc_regs.h>
#include "gt/intel_context.h"
#include "gt/intel_engine_regs.h"
@@ -475,7 +476,7 @@ bool is_inhibit_context(struct intel_context *ce)
{
const u32 *reg_state = ce->lrc_reg_state;
u32 inhibit_mask =
- _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+ REG_MASKED_FIELD_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
return inhibit_mask ==
(reg_state[CTX_CONTEXT_CONTROL_VAL] & inhibit_mask);
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index 90d8eb1761a3..a4cf15e43990 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -91,9 +91,9 @@
((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16))))
#define IS_MASKED_BITS_ENABLED(_val, _b) \
- (((_val) & _MASKED_BIT_ENABLE(_b)) == _MASKED_BIT_ENABLE(_b))
+ (((_val) & REG_MASKED_FIELD_ENABLE(_b)) == REG_MASKED_FIELD_ENABLE(_b))
#define IS_MASKED_BITS_DISABLED(_val, _b) \
- ((_val) & _MASKED_BIT_DISABLE(_b))
+ ((_val) & REG_MASKED_FIELD_DISABLE(_b))
#define FORCEWAKE_RENDER_GEN9_REG 0xa278
#define FORCEWAKE_ACK_RENDER_GEN9_REG 0x0D84
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index cd44cbfb53b5..5cb7a72774a0 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -193,7 +193,7 @@ active_retire(struct i915_active *ref)
return;
if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
- queue_work(system_unbound_wq, &ref->work);
+ queue_work(system_dfl_wq, &ref->work);
return;
}
diff --git a/drivers/gpu/drm/i915/i915_bo.c b/drivers/gpu/drm/i915/i915_bo.c
new file mode 100644
index 000000000000..1789f7cab05c
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_bo.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2024 Intel Corporation */
+
+#include <drm/drm_panic.h>
+#include <drm/drm_print.h>
+#include <drm/intel/display_parent_interface.h>
+
+#include "display/intel_fb.h"
+#include "gem/i915_gem_mman.h"
+#include "gem/i915_gem_object.h"
+#include "gem/i915_gem_object_frontbuffer.h"
+#include "pxp/intel_pxp.h"
+
+#include "i915_bo.h"
+#include "i915_debugfs.h"
+#include "i915_drv.h"
+
+static bool i915_bo_is_tiled(struct drm_gem_object *obj)
+{
+ return i915_gem_object_is_tiled(to_intel_bo(obj));
+}
+
+static bool i915_bo_is_userptr(struct drm_gem_object *obj)
+{
+ return i915_gem_object_is_userptr(to_intel_bo(obj));
+}
+
+static bool i915_bo_is_shmem(struct drm_gem_object *obj)
+{
+ return i915_gem_object_is_shmem(to_intel_bo(obj));
+}
+
+static bool i915_bo_is_protected(struct drm_gem_object *obj)
+{
+ return i915_gem_object_is_protected(to_intel_bo(obj));
+}
+
+static int i915_bo_key_check(struct drm_gem_object *obj)
+{
+ return intel_pxp_key_check(obj, false);
+}
+
+static int i915_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ return i915_gem_fb_mmap(to_intel_bo(obj), vma);
+}
+
+static int i915_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size)
+{
+ return i915_gem_object_read_from_page(to_intel_bo(obj), offset, dst, size);
+}
+
+static void i915_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
+{
+ i915_debugfs_describe_obj(m, to_intel_bo(obj));
+}
+
+static int i915_bo_framebuffer_init(struct drm_gem_object *_obj,
+ struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_i915_gem_object *obj = to_intel_bo(_obj);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ unsigned int tiling, stride;
+
+ i915_gem_object_lock(obj, NULL);
+ tiling = i915_gem_object_get_tiling(obj);
+ stride = i915_gem_object_get_stride(obj);
+ i915_gem_object_unlock(obj);
+
+ if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
+ /*
+ * If there's a fence, enforce that
+ * the fb modifier and tiling mode match.
+ */
+ if (tiling != I915_TILING_NONE &&
+ tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
+ drm_dbg_kms(&i915->drm,
+ "tiling_mode doesn't match fb modifier\n");
+ return -EINVAL;
+ }
+ } else {
+ if (tiling == I915_TILING_X) {
+ mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
+ } else if (tiling == I915_TILING_Y) {
+ drm_dbg_kms(&i915->drm,
+ "No Y tiling for legacy addfb\n");
+ return -EINVAL;
+ }
+ }
+
+ /*
+ * gen2/3 display engine uses the fence if present,
+ * so the tiling mode must match the fb modifier exactly.
+ */
+ if (GRAPHICS_VER(i915) < 4 &&
+ tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
+ drm_dbg_kms(&i915->drm,
+ "tiling_mode must match fb modifier exactly on gen2/3\n");
+ return -EINVAL;
+ }
+
+ /*
+ * If there's a fence, enforce that
+ * the fb pitch and fence stride match.
+ */
+ if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
+ drm_dbg_kms(&i915->drm,
+ "pitch (%d) must match tiling stride (%d)\n",
+ mode_cmd->pitches[0], stride);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void i915_bo_framebuffer_fini(struct drm_gem_object *obj)
+{
+ /* Nothing to do for i915 */
+}
+
+static struct drm_gem_object *
+i915_bo_framebuffer_lookup(struct drm_device *drm,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+ struct drm_i915_gem_object *obj;
+
+ obj = i915_gem_object_lookup(filp, mode_cmd->handles[0]);
+ if (!obj)
+ return ERR_PTR(-ENOENT);
+
+ /* object is backed with LMEM for discrete */
+ if (HAS_LMEM(i915) && !i915_gem_object_can_migrate(obj, INTEL_REGION_LMEM_0)) {
+ /* object is "remote", not in local memory */
+ i915_gem_object_put(obj);
+ drm_dbg_kms(&i915->drm, "framebuffer must reside in local memory\n");
+ return ERR_PTR(-EREMOTE);
+ }
+
+ return intel_bo_to_drm_bo(obj);
+}
+
+const struct intel_display_bo_interface i915_display_bo_interface = {
+ .is_tiled = i915_bo_is_tiled,
+ .is_userptr = i915_bo_is_userptr,
+ .is_shmem = i915_bo_is_shmem,
+ .is_protected = i915_bo_is_protected,
+ .key_check = i915_bo_key_check,
+ .fb_mmap = i915_bo_fb_mmap,
+ .read_from_page = i915_bo_read_from_page,
+ .describe = i915_bo_describe,
+ .framebuffer_init = i915_bo_framebuffer_init,
+ .framebuffer_fini = i915_bo_framebuffer_fini,
+ .framebuffer_lookup = i915_bo_framebuffer_lookup,
+};
diff --git a/drivers/gpu/drm/i915/i915_bo.h b/drivers/gpu/drm/i915/i915_bo.h
new file mode 100644
index 000000000000..57255d052dd9
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_bo.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef __I915_BO_H__
+#define __I915_BO_H__
+
+extern const struct intel_display_bo_interface i915_display_bo_interface;
+
+#endif /* __I915_BO_H__ */
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 42f6b44f0027..4778ba664ec7 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -33,6 +33,7 @@
#include <drm/drm_debugfs.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_gmd_misc_regs.h>
#include "gem/i915_gem_context.h"
#include "gt/intel_gt.h"
diff --git a/drivers/gpu/drm/i915/i915_dpt.c b/drivers/gpu/drm/i915/i915_dpt.c
new file mode 100644
index 000000000000..9f47bb563c85
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_dpt.c
@@ -0,0 +1,277 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2021 Intel Corporation
+ */
+
+#include <drm/drm_print.h>
+#include <drm/intel/display_parent_interface.h>
+
+#include "display/intel_display_core.h"
+#include "gem/i915_gem_domain.h"
+#include "gem/i915_gem_internal.h"
+#include "gem/i915_gem_lmem.h"
+#include "gt/gen8_ppgtt.h"
+
+#include "i915_dpt.h"
+#include "i915_drv.h"
+
+struct intel_dpt {
+ struct i915_address_space vm;
+
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ void __iomem *iomem;
+};
+
+#define i915_is_dpt(vm) ((vm)->is_dpt)
+
+static inline struct intel_dpt *
+i915_vm_to_dpt(struct i915_address_space *vm)
+{
+ BUILD_BUG_ON(offsetof(struct intel_dpt, vm));
+ drm_WARN_ON(&vm->i915->drm, !i915_is_dpt(vm));
+ return container_of(vm, struct intel_dpt, vm);
+}
+
+struct i915_address_space *i915_dpt_to_vm(struct intel_dpt *dpt)
+{
+ return &dpt->vm;
+}
+
+static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
+{
+ writeq(pte, addr);
+}
+
+static void dpt_insert_page(struct i915_address_space *vm,
+ dma_addr_t addr,
+ u64 offset,
+ unsigned int pat_index,
+ u32 flags)
+{
+ struct intel_dpt *dpt = i915_vm_to_dpt(vm);
+ gen8_pte_t __iomem *base = dpt->iomem;
+
+ gen8_set_pte(base + offset / I915_GTT_PAGE_SIZE,
+ vm->pte_encode(addr, pat_index, flags));
+}
+
+static void dpt_insert_entries(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res,
+ unsigned int pat_index,
+ u32 flags)
+{
+ struct intel_dpt *dpt = i915_vm_to_dpt(vm);
+ gen8_pte_t __iomem *base = dpt->iomem;
+ const gen8_pte_t pte_encode = vm->pte_encode(0, pat_index, flags);
+ struct sgt_iter sgt_iter;
+ dma_addr_t addr;
+ int i;
+
+ /*
+ * Note that we ignore PTE_READ_ONLY here. The caller must be careful
+ * not to allow the user to override access to a read only page.
+ */
+
+ i = vma_res->start / I915_GTT_PAGE_SIZE;
+ for_each_sgt_daddr(addr, sgt_iter, vma_res->bi.pages)
+ gen8_set_pte(&base[i++], pte_encode | addr);
+}
+
+static void dpt_clear_range(struct i915_address_space *vm,
+ u64 start, u64 length)
+{
+}
+
+static void dpt_bind_vma(struct i915_address_space *vm,
+ struct i915_vm_pt_stash *stash,
+ struct i915_vma_resource *vma_res,
+ unsigned int pat_index,
+ u32 flags)
+{
+ u32 pte_flags;
+
+ if (vma_res->bound_flags)
+ return;
+
+ /* Applicable to VLV (gen8+ do not support RO in the GGTT) */
+ pte_flags = 0;
+ if (vm->has_read_only && vma_res->bi.readonly)
+ pte_flags |= PTE_READ_ONLY;
+ if (vma_res->bi.lmem)
+ pte_flags |= PTE_LM;
+
+ vm->insert_entries(vm, vma_res, pat_index, pte_flags);
+
+ vma_res->page_sizes_gtt = I915_GTT_PAGE_SIZE;
+
+ /*
+ * Without aliasing PPGTT there's no difference between
+ * GLOBAL/LOCAL_BIND, it's all the same ptes. Hence unconditionally
+ * upgrade to both bound if we bind either to avoid double-binding.
+ */
+ vma_res->bound_flags = I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
+}
+
+static void dpt_unbind_vma(struct i915_address_space *vm,
+ struct i915_vma_resource *vma_res)
+{
+ vm->clear_range(vm, vma_res->start, vma_res->vma_size);
+}
+
+static void dpt_cleanup(struct i915_address_space *vm)
+{
+ struct intel_dpt *dpt = i915_vm_to_dpt(vm);
+
+ i915_gem_object_put(dpt->obj);
+}
+
+struct i915_vma *i915_dpt_pin_to_ggtt(struct intel_dpt *dpt, unsigned int alignment)
+{
+ struct drm_i915_private *i915 = dpt->vm.i915;
+ struct intel_display *display = i915->display;
+ struct ref_tracker *wakeref;
+ struct i915_vma *vma;
+ void __iomem *iomem;
+ struct i915_gem_ww_ctx ww;
+ u64 pin_flags = 0;
+ int err;
+
+ if (i915_gem_object_is_stolen(dpt->obj))
+ pin_flags |= PIN_MAPPABLE;
+
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
+ atomic_inc(&display->restore.pending_fb_pin);
+
+ for_i915_gem_ww(&ww, err, true) {
+ err = i915_gem_object_lock(dpt->obj, &ww);
+ if (err)
+ continue;
+
+ vma = i915_gem_object_ggtt_pin_ww(dpt->obj, &ww, NULL, 0,
+ alignment, pin_flags);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ continue;
+ }
+
+ iomem = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+
+ if (IS_ERR(iomem)) {
+ err = PTR_ERR(iomem);
+ continue;
+ }
+
+ dpt->vma = vma;
+ dpt->iomem = iomem;
+
+ i915_vma_get(vma);
+ }
+
+ dpt->obj->mm.dirty = true;
+
+ atomic_dec(&display->restore.pending_fb_pin);
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
+
+ return err ? ERR_PTR(err) : vma;
+}
+
+void i915_dpt_unpin_from_ggtt(struct intel_dpt *dpt)
+{
+ i915_vma_unpin_iomap(dpt->vma);
+ i915_vma_put(dpt->vma);
+}
+
+static struct intel_dpt *i915_dpt_create(struct drm_gem_object *obj, size_t size)
+{
+ struct drm_i915_private *i915 = to_i915(obj->dev);
+ struct drm_i915_gem_object *dpt_obj;
+ struct i915_address_space *vm;
+ struct intel_dpt *dpt;
+ int ret;
+
+ if (!size)
+ size = DIV_ROUND_UP_ULL(obj->size, I915_GTT_PAGE_SIZE);
+
+ size = round_up(size * sizeof(gen8_pte_t), I915_GTT_PAGE_SIZE);
+
+ dpt_obj = i915_gem_object_create_lmem(i915, size, I915_BO_ALLOC_CONTIGUOUS);
+ if (IS_ERR(dpt_obj) && i915_ggtt_has_aperture(to_gt(i915)->ggtt))
+ dpt_obj = i915_gem_object_create_stolen(i915, size);
+ if (IS_ERR(dpt_obj) && !HAS_LMEM(i915)) {
+ drm_dbg_kms(&i915->drm, "Allocating dpt from smem\n");
+ dpt_obj = i915_gem_object_create_shmem(i915, size);
+ }
+ if (IS_ERR(dpt_obj))
+ return ERR_CAST(dpt_obj);
+
+ ret = i915_gem_object_lock_interruptible(dpt_obj, NULL);
+ if (!ret) {
+ ret = i915_gem_object_set_cache_level(dpt_obj, I915_CACHE_NONE);
+ i915_gem_object_unlock(dpt_obj);
+ }
+ if (ret) {
+ i915_gem_object_put(dpt_obj);
+ return ERR_PTR(ret);
+ }
+
+ dpt = kzalloc_obj(*dpt);
+ if (!dpt) {
+ i915_gem_object_put(dpt_obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ vm = &dpt->vm;
+
+ vm->gt = to_gt(i915);
+ vm->i915 = i915;
+ vm->dma = i915->drm.dev;
+ vm->total = (size / sizeof(gen8_pte_t)) * I915_GTT_PAGE_SIZE;
+ vm->is_dpt = true;
+
+ i915_address_space_init(vm, VM_CLASS_DPT);
+
+ vm->insert_page = dpt_insert_page;
+ vm->clear_range = dpt_clear_range;
+ vm->insert_entries = dpt_insert_entries;
+ vm->cleanup = dpt_cleanup;
+
+ vm->vma_ops.bind_vma = dpt_bind_vma;
+ vm->vma_ops.unbind_vma = dpt_unbind_vma;
+
+ vm->pte_encode = vm->gt->ggtt->vm.pte_encode;
+
+ dpt->obj = dpt_obj;
+ dpt->obj->is_dpt = true;
+
+ return dpt;
+}
+
+static void i915_dpt_destroy(struct intel_dpt *dpt)
+{
+ dpt->obj->is_dpt = false;
+ i915_vm_put(&dpt->vm);
+}
+
+static void i915_dpt_suspend(struct intel_dpt *dpt)
+{
+ i915_ggtt_suspend_vm(&dpt->vm, true);
+}
+
+static void i915_dpt_resume(struct intel_dpt *dpt)
+{
+ i915_ggtt_resume_vm(&dpt->vm, true);
+}
+
+u64 i915_dpt_offset(struct i915_vma *dpt_vma)
+{
+ return i915_vma_offset(dpt_vma);
+}
+
+const struct intel_display_dpt_interface i915_display_dpt_interface = {
+ .create = i915_dpt_create,
+ .destroy = i915_dpt_destroy,
+ .suspend = i915_dpt_suspend,
+ .resume = i915_dpt_resume,
+};
diff --git a/drivers/gpu/drm/i915/i915_dpt.h b/drivers/gpu/drm/i915/i915_dpt.h
new file mode 100644
index 000000000000..08dbe444fe18
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_dpt.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright 2026 Intel Corporation */
+
+#ifndef __I915_DPT_H__
+#define __I915_DPT_H__
+
+#include <linux/types.h>
+
+struct i915_address_space;
+struct i915_vma;
+struct intel_dpt;
+
+struct i915_address_space *i915_dpt_to_vm(struct intel_dpt *dpt);
+struct i915_vma *i915_dpt_pin_to_ggtt(struct intel_dpt *dpt, unsigned int alignment);
+void i915_dpt_unpin_from_ggtt(struct intel_dpt *dpt);
+u64 i915_dpt_offset(struct i915_vma *dpt_vma);
+
+extern const struct intel_display_dpt_interface i915_display_dpt_interface;
+
+#endif /* __I915_DPT_H__ */
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index f0105c5b49a7..385a634c3ed0 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -48,6 +48,7 @@
#include <drm/drm_probe_helper.h>
#include <drm/intel/display_member.h>
#include <drm/intel/display_parent_interface.h>
+#include <drm/intel/intel_pcode_regs.h>
#include "display/i9xx_display_sr.h"
#include "display/intel_bw.h"
@@ -77,6 +78,7 @@
#include "gem/i915_gem_dmabuf.h"
#include "gem/i915_gem_ioctls.h"
#include "gem/i915_gem_mman.h"
+#include "gem/i915_gem_object_frontbuffer.h"
#include "gem/i915_gem_pm.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_pm.h"
@@ -88,11 +90,14 @@
#include "pxp/intel_pxp_debugfs.h"
#include "pxp/intel_pxp_pm.h"
+#include "i915_bo.h"
#include "i915_debugfs.h"
#include "i915_display_pc8.h"
+#include "i915_dpt.h"
#include "i915_driver.h"
#include "i915_drm_client.h"
#include "i915_drv.h"
+#include "i915_dsb_buffer.h"
#include "i915_edram.h"
#include "i915_file_private.h"
#include "i915_getparam.h"
@@ -104,6 +109,7 @@
#include "i915_ioctl.h"
#include "i915_irq.h"
#include "i915_memcpy.h"
+#include "i915_overlay.h"
#include "i915_panic.h"
#include "i915_perf.h"
#include "i915_query.h"
@@ -147,10 +153,11 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv)
/*
* The unordered i915 workqueue should be used for all work
* scheduling that do not require running in order, which used
- * to be scheduled on the system_wq before moving to a driver
+ * to be scheduled on the system_percpu_wq before moving to a driver
* instance due deprecation of flush_scheduled_work().
*/
- dev_priv->unordered_wq = alloc_workqueue("i915-unordered", 0, 0);
+ dev_priv->unordered_wq = alloc_workqueue("i915-unordered", WQ_PERCPU,
+ 0);
if (dev_priv->unordered_wq == NULL)
goto out_free_wq;
@@ -556,10 +563,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
drm_dbg(&dev_priv->drm, "can't enable MSI");
}
- ret = intel_gvt_init(dev_priv);
- if (ret)
- goto err_msi;
-
intel_opregion_setup(display);
ret = i915_pcode_init(dev_priv);
@@ -580,7 +583,6 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
err_opregion:
intel_opregion_cleanup(display);
-err_msi:
if (pdev->msi_enabled)
pci_disable_msi(pdev);
err_mem_regions:
@@ -764,14 +766,21 @@ static bool vgpu_active(struct drm_device *drm)
}
static const struct intel_display_parent_interface parent = {
+ .bo = &i915_display_bo_interface,
+ .dpt = &i915_display_dpt_interface,
+ .dsb = &i915_display_dsb_interface,
+ .frontbuffer = &i915_display_frontbuffer_interface,
.hdcp = &i915_display_hdcp_interface,
.initial_plane = &i915_display_initial_plane_interface,
.irq = &i915_display_irq_interface,
+ .overlay = &i915_display_overlay_interface,
.panic = &i915_display_panic_interface,
.pc8 = &i915_display_pc8_interface,
+ .pcode = &i915_display_pcode_interface,
.rpm = &i915_display_rpm_interface,
.rps = &i915_display_rps_interface,
.stolen = &i915_display_stolen_interface,
+ .vma = &i915_display_vma_interface,
.fence_priority_display = fence_priority_display,
.has_auxccs = has_auxccs,
@@ -868,9 +877,13 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret < 0)
goto out_cleanup_mmio;
+ ret = intel_gvt_init(i915);
+ if (ret)
+ goto out_cleanup_hw;
+
ret = intel_display_driver_probe_noirq(display);
if (ret < 0)
- goto out_cleanup_hw;
+ goto out_cleanup_gvt;
ret = intel_irq_install(i915);
if (ret)
@@ -919,6 +932,8 @@ out_cleanup_irq:
intel_irq_uninstall(i915);
out_cleanup_modeset:
intel_display_driver_remove_nogem(display);
+out_cleanup_gvt:
+ intel_gvt_driver_remove(i915);
out_cleanup_hw:
i915_driver_hw_remove(i915);
intel_memory_regions_driver_release(i915);
@@ -926,7 +941,6 @@ out_cleanup_hw:
i915_gem_drain_freed_objects(i915);
i915_ggtt_driver_late_release(i915);
out_cleanup_mmio:
- intel_gvt_driver_remove(i915);
i915_driver_mmio_release(i915);
out_runtime_pm_put:
enable_rpm_wakeref_asserts(&i915->runtime_pm);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 44ba620325bc..dafee3dcd1c5 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -61,6 +61,7 @@
#include "intel_uncore.h"
struct drm_i915_clock_gating_funcs;
+struct i915_overlay;
struct intel_display;
struct intel_pxp;
struct vlv_s0ix_state;
@@ -248,7 +249,7 @@ struct drm_i915_private {
*
* This workqueue should be used for all unordered work
* scheduling within i915, which used to be scheduled on the
- * system_wq before moving to a driver instance due
+ * system_percpu_wq before moving to a driver instance due
* deprecation of flush_scheduled_work().
*/
struct workqueue_struct *unordered_wq;
@@ -307,6 +308,8 @@ struct drm_i915_private {
struct intel_pxp *pxp;
+ struct i915_overlay *overlay;
+
struct i915_pmu pmu;
/* The TTM device structure. */
diff --git a/drivers/gpu/drm/i915/display/intel_dsb_buffer.c b/drivers/gpu/drm/i915/i915_dsb_buffer.c
index 9b6060af250d..b797e0ccb70c 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb_buffer.c
+++ b/drivers/gpu/drm/i915/i915_dsb_buffer.c
@@ -3,11 +3,13 @@
* Copyright 2023, Intel Corporation.
*/
+#include <drm/intel/display_parent_interface.h>
+
#include "gem/i915_gem_internal.h"
#include "gem/i915_gem_lmem.h"
#include "i915_drv.h"
+#include "i915_dsb_buffer.h"
#include "i915_vma.h"
-#include "intel_dsb_buffer.h"
struct intel_dsb_buffer {
u32 *cmd_buf;
@@ -15,29 +17,29 @@ struct intel_dsb_buffer {
size_t buf_size;
};
-u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
+static u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
{
return i915_ggtt_offset(dsb_buf->vma);
}
-void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val)
+static void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val)
{
dsb_buf->cmd_buf[idx] = val;
}
-u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
+static u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
{
return dsb_buf->cmd_buf[idx];
}
-void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size)
+static void intel_dsb_buffer_fill(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size)
{
WARN_ON(idx > (dsb_buf->buf_size - size) / sizeof(*dsb_buf->cmd_buf));
memset(&dsb_buf->cmd_buf[idx], val, size);
}
-struct intel_dsb_buffer *intel_dsb_buffer_create(struct drm_device *drm, size_t size)
+static struct intel_dsb_buffer *intel_dsb_buffer_create(struct drm_device *drm, size_t size)
{
struct drm_i915_private *i915 = to_i915(drm);
struct intel_dsb_buffer *dsb_buf;
@@ -93,13 +95,23 @@ err:
return ERR_PTR(ret);
}
-void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf)
+static void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf)
{
i915_vma_unpin_and_release(&dsb_buf->vma, I915_VMA_RELEASE_MAP);
kfree(dsb_buf);
}
-void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf)
+static void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf)
{
i915_gem_object_flush_map(dsb_buf->vma->obj);
}
+
+const struct intel_display_dsb_interface i915_display_dsb_interface = {
+ .ggtt_offset = intel_dsb_buffer_ggtt_offset,
+ .write = intel_dsb_buffer_write,
+ .read = intel_dsb_buffer_read,
+ .fill = intel_dsb_buffer_fill,
+ .create = intel_dsb_buffer_create,
+ .cleanup = intel_dsb_buffer_cleanup,
+ .flush_map = intel_dsb_buffer_flush_map,
+};
diff --git a/drivers/gpu/drm/i915/i915_dsb_buffer.h b/drivers/gpu/drm/i915/i915_dsb_buffer.h
new file mode 100644
index 000000000000..a01b4d8de947
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_dsb_buffer.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef __I915_DSB_BUFFER_H__
+#define __I915_DSB_BUFFER_H__
+
+extern const struct intel_display_dsb_interface i915_display_dsb_interface;
+
+#endif /* __I915_DSB_BUFFER_H__ */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 160733619a4a..761491750914 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -579,7 +579,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
goto out_rpm;
}
- i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
+ i915_gem_object_frontbuffer_invalidate(obj, ORIGIN_CPU);
user_data = u64_to_user_ptr(args->data_ptr);
offset = args->offset;
@@ -626,7 +626,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
}
intel_gt_flush_ggtt_writes(ggtt->vm.gt);
- i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
+ i915_gem_object_frontbuffer_flush(obj, ORIGIN_CPU);
i915_gem_gtt_cleanup(obj, &node, vma);
out_rpm:
@@ -714,7 +714,7 @@ i915_gem_shmem_pwrite(struct drm_i915_gem_object *obj,
offset = 0;
}
- i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
+ i915_gem_object_frontbuffer_flush(obj, ORIGIN_CPU);
i915_gem_object_unpin_pages(obj);
return ret;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index a99b4e45d26c..0469c4467f2b 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -824,9 +824,6 @@ static void err_print_gt_global(struct drm_i915_error_state_buf *m,
err_printf(m, "FAULT_TLB_DATA: 0x%08x 0x%08x\n",
gt->fault_data1, gt->fault_data0);
- if (GRAPHICS_VER(m->i915) == 7)
- err_printf(m, "ERR_INT: 0x%08x\n", gt->err_int);
-
if (IS_GRAPHICS_VER(m->i915, 8, 11))
err_printf(m, "GTT_CACHE_EN: 0x%08x\n", gt->gtt_cache);
@@ -1929,9 +1926,6 @@ static void gt_record_global_regs(struct intel_gt_coredump *gt)
if (IS_VALLEYVIEW(i915))
gt->forcewake = intel_uncore_read_fw(uncore, FORCEWAKE_VLV);
- if (GRAPHICS_VER(i915) == 7)
- gt->err_int = intel_uncore_read(uncore, GEN7_ERR_INT);
-
if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 55)) {
gt->fault_data0 = intel_gt_mcr_read_any((struct intel_gt *)gt->_gt,
XEHP_FAULT_TLB_DATA0);
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 91b3df621a49..26970c5e291e 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -149,7 +149,6 @@ struct intel_gt_coredump {
u32 gtier[6], ngtier;
u32 forcewake;
u32 error; /* gen6+ */
- u32 err_int; /* gen7 */
u32 fault_data0; /* gen8, gen9 */
u32 fault_data1; /* gen8, gen9 */
u32 done_reg;
diff --git a/drivers/gpu/drm/i915/i915_hwmon.c b/drivers/gpu/drm/i915/i915_hwmon.c
index 16e64d752e12..c4a799f5fe92 100644
--- a/drivers/gpu/drm/i915/i915_hwmon.c
+++ b/drivers/gpu/drm/i915/i915_hwmon.c
@@ -9,6 +9,8 @@
#include <linux/types.h>
#include <linux/units.h>
+#include <drm/intel/intel_pcode_regs.h>
+
#include "i915_drv.h"
#include "i915_hwmon.h"
#include "i915_reg.h"
diff --git a/drivers/gpu/drm/i915/i915_initial_plane.c b/drivers/gpu/drm/i915/i915_initial_plane.c
index 7fb52d81f7b6..5594548f51d8 100644
--- a/drivers/gpu/drm/i915/i915_initial_plane.c
+++ b/drivers/gpu/drm/i915/i915_initial_plane.c
@@ -9,6 +9,7 @@
#include "display/intel_crtc.h"
#include "display/intel_display_types.h"
#include "display/intel_fb.h"
+#include "display/intel_fbdev_fb.h"
#include "gem/i915_gem_lmem.h"
#include "gem/i915_gem_region.h"
@@ -116,7 +117,7 @@ initial_plane_vma(struct drm_i915_private *i915,
*/
if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
mem == i915->mm.stolen_region &&
- size * 2 > i915->dsm.usable_size) {
+ !intel_fbdev_fb_prefer_stolen(&i915->drm, size)) {
drm_dbg_kms(&i915->drm, "Initial FB size exceeds half of stolen, discarding\n");
return NULL;
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 3fe978d4ea53..d4d8dd0a4174 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -34,6 +34,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
#include <drm/intel/display_parent_interface.h>
+#include <drm/intel/intel_gmd_interrupt_regs.h>
#include "display/intel_display_irq.h"
#include "display/intel_hotplug.h"
diff --git a/drivers/gpu/drm/i915/i915_overlay.c b/drivers/gpu/drm/i915/i915_overlay.c
new file mode 100644
index 000000000000..c2d712bd2b0d
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_overlay.c
@@ -0,0 +1,517 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright 2026, Intel Corporation.
+ */
+
+#include <drm/drm_print.h>
+
+#include <drm/intel/display_parent_interface.h>
+#include <drm/intel/intel_gmd_interrupt_regs.h>
+
+#include "gem/i915_gem_internal.h"
+#include "gem/i915_gem_object_frontbuffer.h"
+#include "gem/i915_gem_pm.h"
+
+#include "gt/intel_gpu_commands.h"
+#include "gt/intel_ring.h"
+
+#include "i915_drv.h"
+#include "i915_overlay.h"
+#include "i915_reg.h"
+#include "intel_pci_config.h"
+
+#include "display/intel_frontbuffer.h"
+
+/* overlay flip addr flag */
+#define OFC_UPDATE 0x1
+
+struct i915_overlay {
+ struct drm_i915_private *i915;
+ struct intel_context *context;
+ struct i915_vma *vma;
+ struct i915_vma *old_vma;
+ struct i915_frontbuffer *frontbuffer;
+ /* register access */
+ struct drm_i915_gem_object *reg_bo;
+ void __iomem *regs;
+ u32 flip_addr;
+ u32 frontbuffer_bits;
+ /* flip handling */
+ struct i915_active last_flip;
+ void (*flip_complete)(struct i915_overlay *overlay);
+};
+
+static void i830_overlay_clock_gating(struct drm_i915_private *i915,
+ bool enable)
+{
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
+ u8 val;
+
+ /*
+ * WA_OVERLAY_CLKGATE:alm
+ *
+ * FIXME should perhaps be done on the display side?
+ */
+ if (enable)
+ intel_uncore_write(&i915->uncore, DSPCLK_GATE_D, 0);
+ else
+ intel_uncore_write(&i915->uncore, DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
+
+ /* WA_DISABLE_L2CACHE_CLOCK_GATING:alm */
+ pci_bus_read_config_byte(pdev->bus,
+ PCI_DEVFN(0, 0), I830_CLOCK_GATE, &val);
+ if (enable)
+ val &= ~I830_L2_CACHE_CLOCK_GATE_DISABLE;
+ else
+ val |= I830_L2_CACHE_CLOCK_GATE_DISABLE;
+ pci_bus_write_config_byte(pdev->bus,
+ PCI_DEVFN(0, 0), I830_CLOCK_GATE, val);
+}
+
+static struct i915_request *
+alloc_request(struct i915_overlay *overlay, void (*fn)(struct i915_overlay *))
+{
+ struct i915_request *rq;
+ int err;
+
+ overlay->flip_complete = fn;
+
+ rq = i915_request_create(overlay->context);
+ if (IS_ERR(rq))
+ return rq;
+
+ err = i915_active_add_request(&overlay->last_flip, rq);
+ if (err) {
+ i915_request_add(rq);
+ return ERR_PTR(err);
+ }
+
+ return rq;
+}
+
+static bool i915_overlay_is_active(struct drm_device *drm)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+ struct i915_overlay *overlay = i915->overlay;
+
+ return overlay->frontbuffer_bits;
+}
+
+/* overlay needs to be disable in OCMD reg */
+static int i915_overlay_on(struct drm_device *drm,
+ u32 frontbuffer_bits)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+ struct i915_overlay *overlay = i915->overlay;
+ struct i915_request *rq;
+ u32 *cs;
+
+ drm_WARN_ON(drm, i915_overlay_is_active(drm));
+
+ rq = alloc_request(overlay, NULL);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ overlay->frontbuffer_bits = frontbuffer_bits;
+
+ if (IS_I830(i915))
+ i830_overlay_clock_gating(i915, false);
+
+ *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_ON;
+ *cs++ = overlay->flip_addr | OFC_UPDATE;
+ *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ i915_request_add(rq);
+
+ return i915_active_wait(&overlay->last_flip);
+}
+
+static void i915_overlay_flip_prepare(struct i915_overlay *overlay,
+ struct i915_vma *vma)
+{
+ struct drm_i915_private *i915 = overlay->i915;
+ struct i915_frontbuffer *frontbuffer = NULL;
+
+ drm_WARN_ON(&i915->drm, overlay->old_vma);
+
+ if (vma)
+ frontbuffer = i915_gem_object_frontbuffer_get(vma->obj);
+
+ i915_gem_object_frontbuffer_track(overlay->frontbuffer, frontbuffer,
+ overlay->frontbuffer_bits);
+
+ if (overlay->frontbuffer)
+ i915_gem_object_frontbuffer_put(overlay->frontbuffer);
+ overlay->frontbuffer = frontbuffer;
+
+ overlay->old_vma = overlay->vma;
+ if (vma)
+ overlay->vma = i915_vma_get(vma);
+ else
+ overlay->vma = NULL;
+}
+
+/* overlay needs to be enabled in OCMD reg */
+static int i915_overlay_continue(struct drm_device *drm,
+ struct i915_vma *vma,
+ bool load_polyphase_filter)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+ struct i915_overlay *overlay = i915->overlay;
+ struct i915_request *rq;
+ u32 flip_addr = overlay->flip_addr;
+ u32 *cs;
+
+ drm_WARN_ON(drm, !i915_overlay_is_active(drm));
+
+ if (load_polyphase_filter)
+ flip_addr |= OFC_UPDATE;
+
+ rq = alloc_request(overlay, NULL);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
+ *cs++ = flip_addr;
+ intel_ring_advance(rq, cs);
+
+ i915_overlay_flip_prepare(overlay, vma);
+ i915_request_add(rq);
+
+ return 0;
+}
+
+static void i915_overlay_release_old_vma(struct i915_overlay *overlay)
+{
+ struct drm_i915_private *i915 = overlay->i915;
+ struct intel_display *display = i915->display;
+ struct i915_vma *vma;
+
+ vma = fetch_and_zero(&overlay->old_vma);
+ if (drm_WARN_ON(&i915->drm, !vma))
+ return;
+
+ intel_frontbuffer_flip(display, overlay->frontbuffer_bits);
+
+ i915_vma_unpin(vma);
+ i915_vma_put(vma);
+}
+
+static void
+i915_overlay_release_old_vid_tail(struct i915_overlay *overlay)
+{
+ i915_overlay_release_old_vma(overlay);
+}
+
+static void i915_overlay_off_tail(struct i915_overlay *overlay)
+{
+ struct drm_i915_private *i915 = overlay->i915;
+
+ i915_overlay_release_old_vma(overlay);
+
+ overlay->frontbuffer_bits = 0;
+
+ if (IS_I830(i915))
+ i830_overlay_clock_gating(i915, true);
+}
+
+static void i915_overlay_last_flip_retire(struct i915_active *active)
+{
+ struct i915_overlay *overlay =
+ container_of(active, typeof(*overlay), last_flip);
+
+ if (overlay->flip_complete)
+ overlay->flip_complete(overlay);
+}
+
+/* overlay needs to be disabled in OCMD reg */
+static int i915_overlay_off(struct drm_device *drm)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+ struct i915_overlay *overlay = i915->overlay;
+ struct i915_request *rq;
+ u32 *cs, flip_addr = overlay->flip_addr;
+
+ drm_WARN_ON(drm, !i915_overlay_is_active(drm));
+
+ /*
+ * According to intel docs the overlay hw may hang (when switching
+ * off) without loading the filter coeffs. It is however unclear whether
+ * this applies to the disabling of the overlay or to the switching off
+ * of the hw. Do it in both cases.
+ */
+ flip_addr |= OFC_UPDATE;
+
+ rq = alloc_request(overlay, i915_overlay_off_tail);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 6);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ /* wait for overlay to go idle */
+ *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE;
+ *cs++ = flip_addr;
+ *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+
+ /* turn overlay off */
+ *cs++ = MI_OVERLAY_FLIP | MI_OVERLAY_OFF;
+ *cs++ = flip_addr;
+ *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+
+ intel_ring_advance(rq, cs);
+
+ i915_overlay_flip_prepare(overlay, NULL);
+ i915_request_add(rq);
+
+ return i915_active_wait(&overlay->last_flip);
+}
+
+/*
+ * Recover from an interruption due to a signal.
+ * We have to be careful not to repeat work forever an make forward progress.
+ */
+static int i915_overlay_recover_from_interrupt(struct drm_device *drm)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+ struct i915_overlay *overlay = i915->overlay;
+
+ return i915_active_wait(&overlay->last_flip);
+}
+
+/*
+ * Wait for pending overlay flip and release old frame.
+ * Needs to be called before the overlay register are changed
+ * via intel_overlay_(un)map_regs.
+ */
+static int i915_overlay_release_old_vid(struct drm_device *drm)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+ struct i915_overlay *overlay = i915->overlay;
+ struct i915_request *rq;
+ u32 *cs;
+
+ /*
+ * Only wait if there is actually an old frame to release to
+ * guarantee forward progress.
+ */
+ if (!overlay->old_vma)
+ return 0;
+
+ if (!(intel_uncore_read(&i915->uncore, GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) {
+ i915_overlay_release_old_vid_tail(overlay);
+ return 0;
+ }
+
+ rq = alloc_request(overlay, i915_overlay_release_old_vid_tail);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ *cs++ = MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(rq, cs);
+
+ i915_request_add(rq);
+
+ return i915_active_wait(&overlay->last_flip);
+}
+
+static void i915_overlay_reset(struct drm_device *drm)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+ struct i915_overlay *overlay = i915->overlay;
+
+ if (!overlay)
+ return;
+
+ overlay->frontbuffer_bits = 0;
+}
+
+static struct i915_vma *i915_overlay_pin_fb(struct drm_device *drm,
+ struct drm_gem_object *obj,
+ u32 *offset)
+{
+ struct drm_i915_gem_object *new_bo = to_intel_bo(obj);
+ struct i915_gem_ww_ctx ww;
+ struct i915_vma *vma;
+ int ret;
+
+ i915_gem_ww_ctx_init(&ww, true);
+retry:
+ ret = i915_gem_object_lock(new_bo, &ww);
+ if (!ret) {
+ vma = i915_gem_object_pin_to_display_plane(new_bo, &ww, 0, 0,
+ NULL, PIN_MAPPABLE);
+ ret = PTR_ERR_OR_ZERO(vma);
+ }
+ if (ret == -EDEADLK) {
+ ret = i915_gem_ww_ctx_backoff(&ww);
+ if (!ret)
+ goto retry;
+ }
+ i915_gem_ww_ctx_fini(&ww);
+ if (ret)
+ return ERR_PTR(ret);
+
+ *offset = i915_ggtt_offset(vma);
+
+ return vma;
+}
+
+static void i915_overlay_unpin_fb(struct drm_device *drm,
+ struct i915_vma *vma)
+{
+ i915_vma_unpin(vma);
+}
+
+static struct drm_gem_object *
+i915_overlay_obj_lookup(struct drm_device *drm,
+ struct drm_file *file_priv,
+ u32 handle)
+{
+ struct drm_i915_gem_object *bo;
+
+ bo = i915_gem_object_lookup(file_priv, handle);
+ if (!bo)
+ return ERR_PTR(-ENOENT);
+
+ if (i915_gem_object_is_tiled(bo)) {
+ drm_dbg(drm, "buffer used for overlay image can not be tiled\n");
+ i915_gem_object_put(bo);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return intel_bo_to_drm_bo(bo);
+}
+
+static int get_registers(struct i915_overlay *overlay, bool use_phys)
+{
+ struct drm_i915_private *i915 = overlay->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_stolen(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_put_bo;
+ }
+
+ if (use_phys)
+ overlay->flip_addr = sg_dma_address(obj->mm.pages->sgl);
+ else
+ overlay->flip_addr = i915_ggtt_offset(vma);
+ overlay->regs = i915_vma_pin_iomap(vma);
+ i915_vma_unpin(vma);
+
+ if (IS_ERR(overlay->regs)) {
+ err = PTR_ERR(overlay->regs);
+ goto err_put_bo;
+ }
+
+ overlay->reg_bo = obj;
+ return 0;
+
+err_put_bo:
+ i915_gem_object_put(obj);
+ return err;
+}
+
+static void __iomem *i915_overlay_setup(struct drm_device *drm,
+ bool needs_physical)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+ struct intel_engine_cs *engine;
+ struct i915_overlay *overlay;
+ int ret;
+
+ engine = to_gt(i915)->engine[RCS0];
+ if (!engine || !engine->kernel_context)
+ return ERR_PTR(-ENOENT);
+
+ overlay = kzalloc_obj(*overlay);
+ if (!overlay)
+ return ERR_PTR(-ENOMEM);
+
+ overlay->i915 = i915;
+ overlay->context = engine->kernel_context;
+
+ i915_active_init(&overlay->last_flip,
+ NULL, i915_overlay_last_flip_retire, 0);
+
+ ret = get_registers(overlay, needs_physical);
+ if (ret) {
+ kfree(overlay);
+ return ERR_PTR(ret);
+ }
+
+ i915->overlay = overlay;
+
+ return overlay->regs;
+}
+
+static void i915_overlay_cleanup(struct drm_device *drm)
+{
+ struct drm_i915_private *i915 = to_i915(drm);
+ struct i915_overlay *overlay = i915->overlay;
+
+ if (!overlay)
+ return;
+
+ /*
+ * The bo's should be free'd by the generic code already.
+ * Furthermore modesetting teardown happens beforehand so the
+ * hardware should be off already.
+ */
+ drm_WARN_ON(drm, i915_overlay_is_active(drm));
+
+ i915_gem_object_put(overlay->reg_bo);
+ i915_active_fini(&overlay->last_flip);
+
+ kfree(overlay);
+ i915->overlay = NULL;
+}
+
+const struct intel_display_overlay_interface i915_display_overlay_interface = {
+ .is_active = i915_overlay_is_active,
+ .overlay_on = i915_overlay_on,
+ .overlay_continue = i915_overlay_continue,
+ .overlay_off = i915_overlay_off,
+ .recover_from_interrupt = i915_overlay_recover_from_interrupt,
+ .release_old_vid = i915_overlay_release_old_vid,
+ .reset = i915_overlay_reset,
+ .obj_lookup = i915_overlay_obj_lookup,
+ .pin_fb = i915_overlay_pin_fb,
+ .unpin_fb = i915_overlay_unpin_fb,
+ .setup = i915_overlay_setup,
+ .cleanup = i915_overlay_cleanup,
+};
diff --git a/drivers/gpu/drm/i915/i915_overlay.h b/drivers/gpu/drm/i915/i915_overlay.h
new file mode 100644
index 000000000000..f8053eb8d189
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_overlay.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2026 Intel Corporation
+ */
+
+#ifndef __I915_OVERLAY_H__
+#define __I915_OVERLAY_H__
+
+extern const struct intel_display_overlay_interface i915_display_overlay_interface;
+
+#endif /* __I915_OVERLAY_H__ */
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 2820e8f0f765..19b82427aa41 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -2635,10 +2635,9 @@ static int gen12_configure_oar_context(struct i915_perf_stream *stream,
{
RING_CONTEXT_CONTROL(ce->engine->mmio_base),
CTX_CONTEXT_CONTROL,
- _MASKED_FIELD(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE,
- active ?
- GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE :
- 0)
+ active ?
+ REG_MASKED_FIELD_ENABLE(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE) :
+ REG_MASKED_FIELD_DISABLE(GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE),
},
};
@@ -2827,8 +2826,8 @@ gen8_enable_metric_set(struct i915_perf_stream *stream,
*/
if (IS_GRAPHICS_VER(stream->perf->i915, 9, 11)) {
intel_uncore_write(uncore, GEN8_OA_DEBUG,
- _MASKED_BIT_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
- GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
+ REG_MASKED_FIELD_ENABLE(GEN9_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
+ GEN9_OA_DEBUG_INCLUDE_CLK_RATIO));
}
/*
@@ -2847,9 +2846,10 @@ gen8_enable_metric_set(struct i915_perf_stream *stream,
static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
{
- return _MASKED_FIELD(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,
- (stream->sample_flags & SAMPLE_OA_REPORT) ?
- 0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
+ if (stream->sample_flags & SAMPLE_OA_REPORT)
+ return REG_MASKED_FIELD_DISABLE(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
+ else
+ return REG_MASKED_FIELD_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
}
static int
@@ -2870,15 +2870,15 @@ gen12_enable_metric_set(struct i915_perf_stream *stream,
*/
if (IS_DG2(i915)) {
intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN,
- _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
+ REG_MASKED_FIELD_ENABLE(STALL_DOP_GATING_DISABLE));
intel_uncore_write(uncore, GEN7_ROW_CHICKEN2,
- _MASKED_BIT_ENABLE(GEN12_DISABLE_DOP_GATING));
+ REG_MASKED_FIELD_ENABLE(GEN12_DISABLE_DOP_GATING));
}
intel_uncore_write(uncore, __oa_regs(stream)->oa_debug,
/* Disable clk ratio reports, like previous Gens. */
- _MASKED_BIT_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
- GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) |
+ REG_MASKED_FIELD_ENABLE(GEN12_OAG_OA_DEBUG_DISABLE_CLK_RATIO_REPORTS |
+ GEN12_OAG_OA_DEBUG_INCLUDE_CLK_RATIO) |
/*
* If the user didn't require OA reports, instruct
* the hardware not to emit ctx switch reports.
@@ -2949,9 +2949,9 @@ static void gen12_disable_metric_set(struct i915_perf_stream *stream)
*/
if (IS_DG2(i915)) {
intel_gt_mcr_multicast_write(uncore->gt, GEN8_ROW_CHICKEN,
- _MASKED_BIT_DISABLE(STALL_DOP_GATING_DISABLE));
+ REG_MASKED_FIELD_DISABLE(STALL_DOP_GATING_DISABLE));
intel_uncore_write(uncore, GEN7_ROW_CHICKEN2,
- _MASKED_BIT_DISABLE(GEN12_DISABLE_DOP_GATING));
+ REG_MASKED_FIELD_DISABLE(GEN12_DISABLE_DOP_GATING));
}
/* disable the context save/restore or OAR counters */
@@ -4475,7 +4475,7 @@ static u32 mask_reg_value(u32 reg, u32 val)
* programmed by userspace doesn't change this.
*/
if (REG_EQUAL(reg, HALF_SLICE_CHICKEN2))
- val = val & ~_MASKED_BIT_ENABLE(GEN8_ST_PO_DISABLE);
+ val = val & ~REG_MASKED_FIELD_ENABLE(GEN8_ST_PO_DISABLE);
/*
* WAIT_FOR_RC6_EXIT has only one bit fulfilling the function
@@ -4483,7 +4483,7 @@ static u32 mask_reg_value(u32 reg, u32 val)
* configs.
*/
if (REG_EQUAL(reg, WAIT_FOR_RC6_EXIT))
- val = val & ~_MASKED_BIT_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
+ val = val & ~REG_MASKED_FIELD_ENABLE(HSW_WAIT_FOR_RC6_EXIT_ENABLE);
return val;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5bf3b4ab2baa..5d99b99b0c57 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -116,9 +116,6 @@
* #define GEN8_BAR _MMIO(0xb888)
*/
-#define GU_CNTL_PROTECTED _MMIO(0x10100C)
-#define DEPRESENT REG_BIT(9)
-
#define GU_CNTL _MMIO(0x101010)
#define LMEM_INIT REG_BIT(7)
#define DRIVERFLR REG_BIT(31)
@@ -328,29 +325,6 @@
#define GEN7_MEDIA_MAX_REQ_COUNT _MMIO(0x4070)
#define GEN7_GFX_MAX_REQ_COUNT _MMIO(0x4074)
-#define GEN7_ERR_INT _MMIO(0x44040)
-#define ERR_INT_POISON (1 << 31)
-#define ERR_INT_INVALID_GTT_PTE (1 << 29)
-#define ERR_INT_INVALID_PTE_DATA (1 << 28)
-#define ERR_INT_SPRITE_C_FAULT (1 << 23)
-#define ERR_INT_PRIMARY_C_FAULT (1 << 22)
-#define ERR_INT_CURSOR_C_FAULT (1 << 21)
-#define ERR_INT_SPRITE_B_FAULT (1 << 20)
-#define ERR_INT_PRIMARY_B_FAULT (1 << 19)
-#define ERR_INT_CURSOR_B_FAULT (1 << 18)
-#define ERR_INT_SPRITE_A_FAULT (1 << 17)
-#define ERR_INT_PRIMARY_A_FAULT (1 << 16)
-#define ERR_INT_CURSOR_A_FAULT (1 << 15)
-#define ERR_INT_MMIO_UNCLAIMED (1 << 13)
-#define ERR_INT_PIPE_CRC_DONE_C (1 << 8)
-#define ERR_INT_FIFO_UNDERRUN_C (1 << 6)
-#define ERR_INT_PIPE_CRC_DONE_B (1 << 5)
-#define ERR_INT_FIFO_UNDERRUN_B (1 << 3)
-#define ERR_INT_PIPE_CRC_DONE_A (1 << 2)
-#define ERR_INT_PIPE_CRC_DONE(pipe) (1 << (2 + (pipe) * 3))
-#define ERR_INT_FIFO_UNDERRUN_A (1 << 0)
-#define ERR_INT_FIFO_UNDERRUN(pipe) (1 << ((pipe) * 3))
-
#define FPGA_DBG _MMIO(0x42300)
#define FPGA_DBG_RM_NOCLAIM REG_BIT(31)
@@ -361,9 +335,6 @@
#define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030)
#define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034)
-#define SCPD0 _MMIO(0x209c) /* 915+ only */
-#define SCPD_FBC_IGNORE_3D (1 << 6)
-#define CSTATE_RENDER_CLOCK_GATE_DISABLE (1 << 5)
#define GEN2_IER _MMIO(0x20a0)
#define GEN2_IIR _MMIO(0x20a4)
#define GEN2_IMR _MMIO(0x20a8)
@@ -377,13 +348,6 @@
#define GINT_DIS (1 << 22)
#define GCFG_DIS (1 << 8)
#define VLV_GUNIT_CLOCK_GATE2 _MMIO(VLV_DISPLAY_BASE + 0x2064)
-#define VLV_IIR_RW _MMIO(VLV_DISPLAY_BASE + 0x2084)
-#define VLV_IER _MMIO(VLV_DISPLAY_BASE + 0x20a0)
-#define VLV_IIR _MMIO(VLV_DISPLAY_BASE + 0x20a4)
-#define VLV_IMR _MMIO(VLV_DISPLAY_BASE + 0x20a8)
-#define VLV_ISR _MMIO(VLV_DISPLAY_BASE + 0x20ac)
-#define VLV_PCBR _MMIO(VLV_DISPLAY_BASE + 0x2120)
-#define VLV_PCBR_ADDR_SHIFT 12
#define EIR _MMIO(0x20b0)
#define EMR _MMIO(0x20b4)
@@ -397,24 +361,10 @@
#define GEN2_ERROR_REGS I915_ERROR_REGS(EMR, EIR)
-#define INSTPM _MMIO(0x20c0)
-#define INSTPM_SELF_EN (1 << 12) /* 915GM only */
-#define INSTPM_AGPBUSY_INT_EN (1 << 11) /* gen3: when disabled, pending interrupts
- will not assert AGPBUSY# and will only
- be delivered when out of C3. */
-#define INSTPM_FORCE_ORDERING (1 << 7) /* GEN6+ */
-#define INSTPM_TLB_INVALIDATE (1 << 9)
-#define INSTPM_SYNC_FLUSH (1 << 5)
#define MEM_MODE _MMIO(0x20cc)
#define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1 << 3) /* 830 only */
#define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1 << 2) /* 830/845 only */
#define MEM_DISPLAY_TRICKLE_FEED_DISABLE (1 << 2) /* 85x only */
-#define FW_BLC _MMIO(0x20d8)
-#define FW_BLC2 _MMIO(0x20dc)
-#define FW_BLC_SELF _MMIO(0x20e0) /* 915+ only */
-#define FW_BLC_SELF_EN_MASK REG_BIT(31)
-#define FW_BLC_SELF_FIFO_MASK REG_BIT(16) /* 945 only */
-#define FW_BLC_SELF_EN REG_BIT(15) /* 945 only */
#define MM_BURST_LENGTH 0x00700000
#define MM_FIFO_WATERMARK 0x0001F000
#define LM_BURST_LENGTH 0x00000700
@@ -524,42 +474,6 @@
/* These are all the "old" interrupts */
#define ILK_BSD_USER_INTERRUPT (1 << 5)
-#define I915_PM_INTERRUPT (1 << 31)
-#define I915_ISP_INTERRUPT (1 << 22)
-#define I915_LPE_PIPE_B_INTERRUPT (1 << 21)
-#define I915_LPE_PIPE_A_INTERRUPT (1 << 20)
-#define I915_MIPIC_INTERRUPT (1 << 19)
-#define I915_MIPIA_INTERRUPT (1 << 18)
-#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 18)
-#define I915_DISPLAY_PORT_INTERRUPT (1 << 17)
-#define I915_DISPLAY_PIPE_C_HBLANK_INTERRUPT (1 << 16)
-#define I915_MASTER_ERROR_INTERRUPT (1 << 15)
-#define I915_DISPLAY_PIPE_B_HBLANK_INTERRUPT (1 << 14)
-#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1 << 14) /* p-state */
-#define I915_DISPLAY_PIPE_A_HBLANK_INTERRUPT (1 << 13)
-#define I915_HWB_OOM_INTERRUPT (1 << 13)
-#define I915_LPE_PIPE_C_INTERRUPT (1 << 12)
-#define I915_SYNC_STATUS_INTERRUPT (1 << 12)
-#define I915_MISC_INTERRUPT (1 << 11)
-#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1 << 11)
-#define I915_DISPLAY_PIPE_C_VBLANK_INTERRUPT (1 << 10)
-#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1 << 10)
-#define I915_DISPLAY_PIPE_C_EVENT_INTERRUPT (1 << 9)
-#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1 << 9)
-#define I915_DISPLAY_PIPE_C_DPBM_INTERRUPT (1 << 8)
-#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1 << 8)
-#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1 << 7)
-#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1 << 6)
-#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1 << 5)
-#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1 << 4)
-#define I915_DISPLAY_PIPE_A_DPBM_INTERRUPT (1 << 3)
-#define I915_DISPLAY_PIPE_B_DPBM_INTERRUPT (1 << 2)
-#define I915_DEBUG_INTERRUPT (1 << 2)
-#define I915_WINVALID_INTERRUPT (1 << 1)
-#define I915_USER_INTERRUPT (1 << 1)
-#define I915_ASLE_INTERRUPT (1 << 0)
-#define I915_BSD_USER_INTERRUPT (1 << 25)
-
#define GEN6_BSD_RNCID _MMIO(0x12198)
#define GEN7_FF_THREAD_MODE _MMIO(0x20a0)
@@ -613,47 +527,6 @@
#define DSTATE_GFX_CLOCK_GATING (1 << 1)
#define DSTATE_DOT_CLOCK_GATING (1 << 0)
-#define DSPCLK_GATE_D _MMIO(0x6200)
-#define VLV_DSPCLK_GATE_D _MMIO(VLV_DISPLAY_BASE + 0x6200)
-# define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */
-# define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */
-# define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */
-# define VRDUNIT_CLOCK_GATE_DISABLE (1 << 27) /* 965 */
-# define AUDUNIT_CLOCK_GATE_DISABLE (1 << 26) /* 965 */
-# define DPUNIT_A_CLOCK_GATE_DISABLE (1 << 25) /* 965 */
-# define DPCUNIT_CLOCK_GATE_DISABLE (1 << 24) /* 965 */
-# define PNV_GMBUSUNIT_CLOCK_GATE_DISABLE (1 << 24) /* pnv */
-# define TVRUNIT_CLOCK_GATE_DISABLE (1 << 23) /* 915-945 */
-# define TVCUNIT_CLOCK_GATE_DISABLE (1 << 22) /* 915-945 */
-# define TVFUNIT_CLOCK_GATE_DISABLE (1 << 21) /* 915-945 */
-# define TVEUNIT_CLOCK_GATE_DISABLE (1 << 20) /* 915-945 */
-# define DVSUNIT_CLOCK_GATE_DISABLE (1 << 19) /* 915-945 */
-# define DSSUNIT_CLOCK_GATE_DISABLE (1 << 18) /* 915-945 */
-# define DDBUNIT_CLOCK_GATE_DISABLE (1 << 17) /* 915-945 */
-# define DPRUNIT_CLOCK_GATE_DISABLE (1 << 16) /* 915-945 */
-# define DPFUNIT_CLOCK_GATE_DISABLE (1 << 15) /* 915-945 */
-# define DPBMUNIT_CLOCK_GATE_DISABLE (1 << 14) /* 915-945 */
-# define DPLSUNIT_CLOCK_GATE_DISABLE (1 << 13) /* 915-945 */
-# define DPLUNIT_CLOCK_GATE_DISABLE (1 << 12) /* 915-945 */
-# define DPOUNIT_CLOCK_GATE_DISABLE (1 << 11)
-# define DPBUNIT_CLOCK_GATE_DISABLE (1 << 10)
-# define DCUNIT_CLOCK_GATE_DISABLE (1 << 9)
-# define DPUNIT_CLOCK_GATE_DISABLE (1 << 8)
-# define VRUNIT_CLOCK_GATE_DISABLE (1 << 7) /* 915+: reserved */
-# define OVHUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 830-865 */
-# define DPIOUNIT_CLOCK_GATE_DISABLE (1 << 6) /* 915-945 */
-# define OVFUNIT_CLOCK_GATE_DISABLE (1 << 5)
-# define OVBUNIT_CLOCK_GATE_DISABLE (1 << 4)
-/*
- * This bit must be set on the 830 to prevent hangs when turning off the
- * overlay scaler.
- */
-# define OVRUNIT_CLOCK_GATE_DISABLE (1 << 3)
-# define OVCUNIT_CLOCK_GATE_DISABLE (1 << 2)
-# define OVUUNIT_CLOCK_GATE_DISABLE (1 << 1)
-# define ZVUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 830 */
-# define OVLUNIT_CLOCK_GATE_DISABLE (1 << 0) /* 845,865 */
-
#define RENCLK_GATE_D1 _MMIO(0x6204)
# define BLITTER_CLOCK_GATE_DISABLE (1 << 13) /* 945GM only */
# define MPEG_CLOCK_GATE_DISABLE (1 << 12) /* 945GM only */
@@ -758,19 +631,6 @@
#define VLV_CLK_CTL2 _MMIO(0x101104)
#define CLK_CTL2_CZCOUNT_30NS_SHIFT 28
-/*
- * GEN9 clock gating regs
- */
-#define GEN9_CLKGATE_DIS_0 _MMIO(0x46530)
-#define DARBF_GATING_DIS REG_BIT(27)
-#define MTL_PIPEDMC_GATING_DIS(pipe) REG_BIT(15 - (pipe))
-#define PWM2_GATING_DIS REG_BIT(14)
-#define PWM1_GATING_DIS REG_BIT(13)
-
-#define GEN9_CLKGATE_DIS_3 _MMIO(0x46538)
-#define TGL_VRH_GATING_DIS REG_BIT(31)
-#define DPT_GATING_DIS REG_BIT(22)
-
#define VLV_DPFLIPSTAT _MMIO(VLV_DISPLAY_BASE + 0x70028)
#define PIPEB_LINE_COMPARE_INT_EN REG_BIT(29)
#define PIPEB_HLINE_INT_EN REG_BIT(28)
@@ -799,56 +659,9 @@
#define PCH_3DCGDIS1 _MMIO(0x46024)
# define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11)
-/* Display Internal Timeout Register */
-#define RM_TIMEOUT _MMIO(0x42060)
-#define RM_TIMEOUT_REG_CAPTURE _MMIO(0x420E0)
-#define MMIO_TIMEOUT_US(us) ((us) << 0)
-
-/* interrupts */
-#define DE_MASTER_IRQ_CONTROL (1 << 31)
-#define DE_SPRITEB_FLIP_DONE (1 << 29)
-#define DE_SPRITEA_FLIP_DONE (1 << 28)
-#define DE_PLANEB_FLIP_DONE (1 << 27)
-#define DE_PLANEA_FLIP_DONE (1 << 26)
-#define DE_PLANE_FLIP_DONE(plane) (1 << (26 + (plane)))
-#define DE_PCU_EVENT (1 << 25)
-#define DE_GTT_FAULT (1 << 24)
-#define DE_POISON (1 << 23)
-#define DE_PERFORM_COUNTER (1 << 22)
-#define DE_PCH_EVENT (1 << 21)
-#define DE_AUX_CHANNEL_A (1 << 20)
-#define DE_DP_A_HOTPLUG (1 << 19)
-#define DE_GSE (1 << 18)
-#define DE_PIPEB_VBLANK (1 << 15)
-#define DE_PIPEB_EVEN_FIELD (1 << 14)
-#define DE_PIPEB_ODD_FIELD (1 << 13)
-#define DE_PIPEB_LINE_COMPARE (1 << 12)
-#define DE_PIPEB_VSYNC (1 << 11)
-#define DE_PIPEB_CRC_DONE (1 << 10)
-#define DE_PIPEB_FIFO_UNDERRUN (1 << 8)
-#define DE_PIPEA_VBLANK (1 << 7)
-#define DE_PIPE_VBLANK(pipe) (1 << (7 + 8 * (pipe)))
-#define DE_PIPEA_EVEN_FIELD (1 << 6)
-#define DE_PIPEA_ODD_FIELD (1 << 5)
-#define DE_PIPEA_LINE_COMPARE (1 << 4)
-#define DE_PIPEA_VSYNC (1 << 3)
-#define DE_PIPEA_CRC_DONE (1 << 2)
-#define DE_PIPE_CRC_DONE(pipe) (1 << (2 + 8 * (pipe)))
-#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
-#define DE_PIPE_FIFO_UNDERRUN(pipe) (1 << (8 * (pipe)))
-
#define VLV_MASTER_IER _MMIO(0x4400c) /* Gunit master IER */
#define MASTER_INTERRUPT_ENABLE (1 << 31)
-#define DEISR _MMIO(0x44000)
-#define DEIMR _MMIO(0x44004)
-#define DEIIR _MMIO(0x44008)
-#define DEIER _MMIO(0x4400c)
-
-#define DE_IRQ_REGS I915_IRQ_REGS(DEIMR, \
- DEIER, \
- DEIIR)
-
#define GTISR _MMIO(0x44010)
#define GTIMR _MMIO(0x44014)
#define GTIIR _MMIO(0x44018)
@@ -858,24 +671,6 @@
GTIER, \
GTIIR)
-#define GEN8_MASTER_IRQ _MMIO(0x44200)
-#define GEN8_MASTER_IRQ_CONTROL (1 << 31)
-#define GEN8_PCU_IRQ (1 << 30)
-#define GEN8_DE_PCH_IRQ (1 << 23)
-#define GEN8_DE_MISC_IRQ (1 << 22)
-#define GEN8_DE_PORT_IRQ (1 << 20)
-#define GEN8_DE_PIPE_C_IRQ (1 << 18)
-#define GEN8_DE_PIPE_B_IRQ (1 << 17)
-#define GEN8_DE_PIPE_A_IRQ (1 << 16)
-#define GEN8_DE_PIPE_IRQ(pipe) (1 << (16 + (pipe)))
-#define GEN8_GT_VECS_IRQ (1 << 6)
-#define GEN8_GT_GUC_IRQ (1 << 5)
-#define GEN8_GT_PM_IRQ (1 << 4)
-#define GEN8_GT_VCS1_IRQ (1 << 3) /* NB: VCS2 in bspec! */
-#define GEN8_GT_VCS0_IRQ (1 << 2) /* NB: VCS1 in bpsec! */
-#define GEN8_GT_BCS_IRQ (1 << 1)
-#define GEN8_GT_RCS_IRQ (1 << 0)
-
#define GEN8_GT_ISR(which) _MMIO(0x44300 + (0x10 * (which)))
#define GEN8_GT_IMR(which) _MMIO(0x44304 + (0x10 * (which)))
#define GEN8_GT_IIR(which) _MMIO(0x44308 + (0x10 * (which)))
@@ -901,25 +696,6 @@
GEN8_PCU_IER, \
GEN8_PCU_IIR)
-#define GEN11_GU_MISC_ISR _MMIO(0x444f0)
-#define GEN11_GU_MISC_IMR _MMIO(0x444f4)
-#define GEN11_GU_MISC_IIR _MMIO(0x444f8)
-#define GEN11_GU_MISC_IER _MMIO(0x444fc)
-#define GEN11_GU_MISC_GSE (1 << 27)
-
-#define GEN11_GU_MISC_IRQ_REGS I915_IRQ_REGS(GEN11_GU_MISC_IMR, \
- GEN11_GU_MISC_IER, \
- GEN11_GU_MISC_IIR)
-
-#define GEN11_GFX_MSTR_IRQ _MMIO(0x190010)
-#define GEN11_MASTER_IRQ (1 << 31)
-#define GEN11_PCU_IRQ (1 << 30)
-#define GEN11_GU_MISC_IRQ (1 << 29)
-#define GEN11_DISPLAY_IRQ (1 << 16)
-#define GEN11_GT_DW_IRQ(x) (1 << (x))
-#define GEN11_GT_DW1_IRQ (1 << 1)
-#define GEN11_GT_DW0_IRQ (1 << 0)
-
#define DG1_MSTR_TILE_INTR _MMIO(0x190008)
#define DG1_MSTR_IRQ REG_BIT(31)
#define DG1_MSTR_TILE(t) REG_BIT(t)
@@ -941,133 +717,9 @@
#define CHICKEN3_DGMG_REQ_OUT_FIX_DISABLE REG_BIT(5)
#define CHICKEN3_DGMG_DONE_FIX_DISABLE REG_BIT(2)
-#define CHICKEN_PAR1_1 _MMIO(0x42080)
-#define IGNORE_KVMR_PIPE_A REG_BIT(23)
-#define KBL_ARB_FILL_SPARE_22 REG_BIT(22)
-#define DIS_RAM_BYPASS_PSR2_MAN_TRACK REG_BIT(16)
-#define SKL_DE_COMPRESSED_HASH_MODE REG_BIT(15)
-#define HSW_MASK_VBL_TO_PIPE_IN_SRD REG_BIT(15) /* hsw/bdw */
-#define FORCE_ARB_IDLE_PLANES REG_BIT(14)
-#define SKL_EDP_PSR_FIX_RDWRAP REG_BIT(3)
-#define IGNORE_PSR2_HW_TRACKING REG_BIT(1)
-
#define CHICKEN_PAR2_1 _MMIO(0x42090)
#define KVM_CONFIG_CHANGE_NOTIFICATION_SELECT REG_BIT(14)
-#define _CHICKEN_PIPESL_1_A 0x420b0
-#define _CHICKEN_PIPESL_1_B 0x420b4
-#define CHICKEN_PIPESL_1(pipe) _MMIO_PIPE(pipe, _CHICKEN_PIPESL_1_A, _CHICKEN_PIPESL_1_B)
-#define HSW_PRI_STRETCH_MAX_MASK REG_GENMASK(28, 27)
-#define HSW_PRI_STRETCH_MAX_X8 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 0)
-#define HSW_PRI_STRETCH_MAX_X4 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 1)
-#define HSW_PRI_STRETCH_MAX_X2 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 2)
-#define HSW_PRI_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_PRI_STRETCH_MAX_MASK, 3)
-#define HSW_SPR_STRETCH_MAX_MASK REG_GENMASK(26, 25)
-#define HSW_SPR_STRETCH_MAX_X8 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 0)
-#define HSW_SPR_STRETCH_MAX_X4 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 1)
-#define HSW_SPR_STRETCH_MAX_X2 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 2)
-#define HSW_SPR_STRETCH_MAX_X1 REG_FIELD_PREP(HSW_SPR_STRETCH_MAX_MASK, 3)
-#define HSW_FBCQ_DIS REG_BIT(22)
-#define HSW_UNMASK_VBL_TO_REGS_IN_SRD REG_BIT(15) /* hsw */
-#define SKL_PSR_MASK_PLANE_FLIP REG_BIT(11) /* skl+ */
-#define SKL_PLANE1_STRETCH_MAX_MASK REG_GENMASK(1, 0)
-#define SKL_PLANE1_STRETCH_MAX_X8 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 0)
-#define SKL_PLANE1_STRETCH_MAX_X4 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 1)
-#define SKL_PLANE1_STRETCH_MAX_X2 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 2)
-#define SKL_PLANE1_STRETCH_MAX_X1 REG_FIELD_PREP(SKL_PLANE1_STRETCH_MAX_MASK, 3)
-#define BDW_UNMASK_VBL_TO_REGS_IN_SRD REG_BIT(0) /* bdw */
-
-#define DISP_ARB_CTL _MMIO(0x45000)
-#define DISP_FBC_MEMORY_WAKE REG_BIT(31)
-#define DISP_TILE_SURFACE_SWIZZLING REG_BIT(13)
-#define DISP_FBC_WM_DIS REG_BIT(15)
-
-#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
-#define _LATENCY_REPORTING_REMOVED_PIPE_D REG_BIT(31)
-#define SKL_SELECT_ALTERNATE_DC_EXIT REG_BIT(30)
-#define _LATENCY_REPORTING_REMOVED_PIPE_C REG_BIT(25)
-#define _LATENCY_REPORTING_REMOVED_PIPE_B REG_BIT(24)
-#define _LATENCY_REPORTING_REMOVED_PIPE_A REG_BIT(23)
-#define LATENCY_REPORTING_REMOVED(pipe) _PICK((pipe), \
- _LATENCY_REPORTING_REMOVED_PIPE_A, \
- _LATENCY_REPORTING_REMOVED_PIPE_B, \
- _LATENCY_REPORTING_REMOVED_PIPE_C, \
- _LATENCY_REPORTING_REMOVED_PIPE_D)
-#define ICL_DELAY_PMRSP REG_BIT(22)
-#define DISABLE_FLR_SRC REG_BIT(15)
-#define MASK_WAKEMEM REG_BIT(13)
-#define DDI_CLOCK_REG_ACCESS REG_BIT(7)
-
-#define GMD_ID_DISPLAY _MMIO(0x510a0)
-#define GMD_ID_ARCH_MASK REG_GENMASK(31, 22)
-#define GMD_ID_RELEASE_MASK REG_GENMASK(21, 14)
-#define GMD_ID_STEP REG_GENMASK(5, 0)
-
-/* PCH */
-
-#define SDEISR _MMIO(0xc4000)
-#define SDEIMR _MMIO(0xc4004)
-#define SDEIIR _MMIO(0xc4008)
-#define SDEIER _MMIO(0xc400c)
-
-/* Icelake PPS_DATA and _ECC DIP Registers.
- * These are available for transcoders B,C and eDP.
- * Adding the _A so as to reuse the _MMIO_TRANS2
- * definition, with which it offsets to the right location.
- */
-
-#define _TRANSA_CHICKEN1 0xf0060
-#define _TRANSB_CHICKEN1 0xf1060
-#define TRANS_CHICKEN1(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1)
-#define TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE REG_BIT(10)
-#define TRANS_CHICKEN1_DP0UNIT_GC_DISABLE REG_BIT(4)
-
-#define _TRANSA_CHICKEN2 0xf0064
-#define _TRANSB_CHICKEN2 0xf1064
-#define TRANS_CHICKEN2(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2)
-#define TRANS_CHICKEN2_TIMING_OVERRIDE REG_BIT(31)
-#define TRANS_CHICKEN2_FDI_POLARITY_REVERSED REG_BIT(29)
-#define TRANS_CHICKEN2_FRAME_START_DELAY_MASK REG_GENMASK(28, 27)
-#define TRANS_CHICKEN2_FRAME_START_DELAY(x) REG_FIELD_PREP(TRANS_CHICKEN2_FRAME_START_DELAY_MASK, (x)) /* 0-3 */
-#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_COUNTER REG_BIT(26)
-#define TRANS_CHICKEN2_DISABLE_DEEP_COLOR_MODESWITCH REG_BIT(25)
-
-#define SOUTH_CHICKEN1 _MMIO(0xc2000)
-#define FDIA_PHASE_SYNC_SHIFT_OVR 19
-#define FDIA_PHASE_SYNC_SHIFT_EN 18
-#define INVERT_DDIE_HPD REG_BIT(28)
-#define INVERT_DDID_HPD_MTP REG_BIT(27)
-#define INVERT_TC4_HPD REG_BIT(26)
-#define INVERT_TC3_HPD REG_BIT(25)
-#define INVERT_TC2_HPD REG_BIT(24)
-#define INVERT_TC1_HPD REG_BIT(23)
-#define INVERT_DDID_HPD (1 << 18)
-#define INVERT_DDIC_HPD (1 << 17)
-#define INVERT_DDIB_HPD (1 << 16)
-#define INVERT_DDIA_HPD (1 << 15)
-#define FDI_PHASE_SYNC_OVR(pipe) (1 << (FDIA_PHASE_SYNC_SHIFT_OVR - ((pipe) * 2)))
-#define FDI_PHASE_SYNC_EN(pipe) (1 << (FDIA_PHASE_SYNC_SHIFT_EN - ((pipe) * 2)))
-#define FDI_BC_BIFURCATION_SELECT (1 << 12)
-#define CHASSIS_CLK_REQ_DURATION_MASK (0xf << 8)
-#define CHASSIS_CLK_REQ_DURATION(x) ((x) << 8)
-#define SBCLK_RUN_REFCLK_DIS (1 << 7)
-#define ICP_SECOND_PPS_IO_SELECT REG_BIT(2)
-#define SPT_PWM_GRANULARITY (1 << 0)
-#define SOUTH_CHICKEN2 _MMIO(0xc2004)
-#define FDI_MPHY_IOSFSB_RESET_STATUS (1 << 13)
-#define FDI_MPHY_IOSFSB_RESET_CTL (1 << 12)
-#define LPT_PWM_GRANULARITY (1 << 5)
-#define DPLS_EDP_PPS_FIX_DIS (1 << 0)
-
-#define SOUTH_DSPCLK_GATE_D _MMIO(0xc2020)
-#define PCH_GMBUSUNIT_CLOCK_GATE_DISABLE (1 << 31)
-#define PCH_DPLUNIT_CLOCK_GATE_DISABLE (1 << 30)
-#define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1 << 29)
-#define PCH_DPMGUNIT_CLOCK_GATE_DISABLE (1 << 15)
-#define PCH_CPUNIT_CLOCK_GATE_DISABLE (1 << 14)
-#define CNP_PWM_CGE_GATING_DISABLE (1 << 13)
-#define PCH_LP_PARTITION_LEVEL_DISABLE (1 << 12)
-
#define VLV_PMWGICZ _MMIO(0x1300a4)
#define HSW_EDRAM_CAP _MMIO(0x120010)
@@ -1076,106 +728,6 @@
#define EDRAM_WAYS_IDX(cap) (((cap) >> 5) & 0x7)
#define EDRAM_SETS_IDX(cap) (((cap) >> 8) & 0x3)
-#define GEN6_PCODE_MAILBOX _MMIO(0x138124)
-#define GEN6_PCODE_READY (1 << 31)
-#define GEN6_PCODE_MB_PARAM2 REG_GENMASK(23, 16)
-#define GEN6_PCODE_MB_PARAM1 REG_GENMASK(15, 8)
-#define GEN6_PCODE_MB_COMMAND REG_GENMASK(7, 0)
-#define GEN6_PCODE_ERROR_MASK 0xFF
-#define GEN6_PCODE_SUCCESS 0x0
-#define GEN6_PCODE_ILLEGAL_CMD 0x1
-#define GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x2
-#define GEN6_PCODE_TIMEOUT 0x3
-#define GEN6_PCODE_UNIMPLEMENTED_CMD 0xFF
-#define GEN7_PCODE_TIMEOUT 0x2
-#define GEN7_PCODE_ILLEGAL_DATA 0x3
-#define GEN11_PCODE_ILLEGAL_SUBCOMMAND 0x4
-#define GEN11_PCODE_LOCKED 0x6
-#define GEN11_PCODE_REJECTED 0x11
-#define GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10
-#define GEN6_PCODE_WRITE_RC6VIDS 0x4
-#define GEN6_PCODE_READ_RC6VIDS 0x5
-#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
-#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
-#define BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ 0x18
-#define GEN9_PCODE_READ_MEM_LATENCY 0x6
-#define GEN9_MEM_LATENCY_LEVEL_3_7_MASK REG_GENMASK(31, 24)
-#define GEN9_MEM_LATENCY_LEVEL_2_6_MASK REG_GENMASK(23, 16)
-#define GEN9_MEM_LATENCY_LEVEL_1_5_MASK REG_GENMASK(15, 8)
-#define GEN9_MEM_LATENCY_LEVEL_0_4_MASK REG_GENMASK(7, 0)
-#define SKL_PCODE_LOAD_HDCP_KEYS 0x5
-#define SKL_PCODE_CDCLK_CONTROL 0x7
-#define SKL_CDCLK_PREPARE_FOR_CHANGE 0x3
-#define SKL_CDCLK_READY_FOR_CHANGE 0x1
-#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
-#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
-#define GEN6_READ_OC_PARAMS 0xc
-#define ICL_PCODE_MEM_SUBSYSYSTEM_INFO 0xd
-#define ICL_PCODE_MEM_SS_READ_GLOBAL_INFO (0x0 << 8)
-#define ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point) (((point) << 16) | (0x1 << 8))
-#define ADL_PCODE_MEM_SS_READ_PSF_GV_INFO ((0) | (0x2 << 8))
-#define DISPLAY_TO_PCODE_CDCLK_MAX 0x28D
-#define DISPLAY_TO_PCODE_VOLTAGE_MASK REG_GENMASK(1, 0)
-#define DISPLAY_TO_PCODE_VOLTAGE_MAX DISPLAY_TO_PCODE_VOLTAGE_MASK
-#define DISPLAY_TO_PCODE_CDCLK_VALID REG_BIT(27)
-#define DISPLAY_TO_PCODE_PIPE_COUNT_VALID REG_BIT(31)
-#define DISPLAY_TO_PCODE_CDCLK_MASK REG_GENMASK(25, 16)
-#define DISPLAY_TO_PCODE_PIPE_COUNT_MASK REG_GENMASK(30, 28)
-#define DISPLAY_TO_PCODE_CDCLK(x) REG_FIELD_PREP(DISPLAY_TO_PCODE_CDCLK_MASK, (x))
-#define DISPLAY_TO_PCODE_PIPE_COUNT(x) REG_FIELD_PREP(DISPLAY_TO_PCODE_PIPE_COUNT_MASK, (x))
-#define DISPLAY_TO_PCODE_VOLTAGE(x) REG_FIELD_PREP(DISPLAY_TO_PCODE_VOLTAGE_MASK, (x))
-#define DISPLAY_TO_PCODE_UPDATE_MASK(cdclk, num_pipes, voltage_level) \
- ((DISPLAY_TO_PCODE_CDCLK(cdclk)) | \
- (DISPLAY_TO_PCODE_PIPE_COUNT(num_pipes)) | \
- (DISPLAY_TO_PCODE_VOLTAGE(voltage_level)))
-#define ICL_PCODE_SAGV_DE_MEM_SS_CONFIG 0xe
-#define ICL_PCODE_REP_QGV_MASK REG_GENMASK(1, 0)
-#define ICL_PCODE_REP_QGV_SAFE REG_FIELD_PREP(ICL_PCODE_REP_QGV_MASK, 0)
-#define ICL_PCODE_REP_QGV_POLL REG_FIELD_PREP(ICL_PCODE_REP_QGV_MASK, 1)
-#define ICL_PCODE_REP_QGV_REJECTED REG_FIELD_PREP(ICL_PCODE_REP_QGV_MASK, 2)
-#define ADLS_PCODE_REP_PSF_MASK REG_GENMASK(3, 2)
-#define ADLS_PCODE_REP_PSF_SAFE REG_FIELD_PREP(ADLS_PCODE_REP_PSF_MASK, 0)
-#define ADLS_PCODE_REP_PSF_POLL REG_FIELD_PREP(ADLS_PCODE_REP_PSF_MASK, 1)
-#define ADLS_PCODE_REP_PSF_REJECTED REG_FIELD_PREP(ADLS_PCODE_REP_PSF_MASK, 2)
-#define ICL_PCODE_REQ_QGV_PT_MASK REG_GENMASK(7, 0)
-#define ICL_PCODE_REQ_QGV_PT(x) REG_FIELD_PREP(ICL_PCODE_REQ_QGV_PT_MASK, (x))
-#define ADLS_PCODE_REQ_PSF_PT_MASK REG_GENMASK(10, 8)
-#define ADLS_PCODE_REQ_PSF_PT(x) REG_FIELD_PREP(ADLS_PCODE_REQ_PSF_PT_MASK, (x))
-#define GEN6_PCODE_READ_D_COMP 0x10
-#define GEN6_PCODE_WRITE_D_COMP 0x11
-#define ICL_PCODE_EXIT_TCCOLD 0x12
-#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17
-#define DISPLAY_IPS_CONTROL 0x19
-#define TGL_PCODE_TCCOLD 0x26
-#define TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED REG_BIT(0)
-#define TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ 0
-#define TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ REG_BIT(0)
- /* See also IPS_CTL */
-#define IPS_PCODE_CONTROL (1 << 30)
-#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
-#define GEN9_PCODE_SAGV_CONTROL 0x21
-#define GEN9_SAGV_DISABLE 0x0
-#define GEN9_SAGV_IS_DISABLED 0x1
-#define GEN9_SAGV_ENABLE 0x3
-#define DG1_PCODE_STATUS 0x7E
-#define DG1_UNCORE_GET_INIT_STATUS 0x0
-#define DG1_UNCORE_INIT_STATUS_COMPLETE 0x1
-#define PCODE_POWER_SETUP 0x7C
-#define POWER_SETUP_SUBCOMMAND_READ_I1 0x4
-#define POWER_SETUP_SUBCOMMAND_WRITE_I1 0x5
-#define POWER_SETUP_I1_WATTS REG_BIT(31)
-#define POWER_SETUP_I1_SHIFT 6 /* 10.6 fixed point format */
-#define POWER_SETUP_I1_DATA_MASK REG_GENMASK(15, 0)
-#define POWER_SETUP_SUBCOMMAND_G8_ENABLE 0x6
-#define GEN12_PCODE_READ_SAGV_BLOCK_TIME_US 0x23
-#define XEHP_PCODE_FREQUENCY_CONFIG 0x6e /* pvc */
-/* XEHP_PCODE_FREQUENCY_CONFIG sub-commands (param1) */
-#define PCODE_MBOX_FC_SC_READ_FUSED_P0 0x0
-#define PCODE_MBOX_FC_SC_READ_FUSED_PN 0x1
-/* PCODE_MBOX_DOMAIN_* - mailbox domain IDs */
-/* XEHP_PCODE_FREQUENCY_CONFIG param2 */
-#define PCODE_MBOX_DOMAIN_NONE 0x0
-#define PCODE_MBOX_DOMAIN_MEDIAFF 0x3
#define GEN6_PCODE_DATA _MMIO(0x138128)
#define GEN6_PCODE_FREQ_IA_RATIO_SHIFT 8
#define GEN6_PCODE_FREQ_RING_RATIO_SHIFT 16
@@ -1224,20 +776,9 @@
#define SGGI_DIS REG_BIT(15)
#define SGR_DIS REG_BIT(13)
-#define PRIMARY_SPI_TRIGGER _MMIO(0x102040)
-#define PRIMARY_SPI_ADDRESS _MMIO(0x102080)
-#define PRIMARY_SPI_REGIONID _MMIO(0x102084)
-#define SPI_STATIC_REGIONS _MMIO(0x102090)
-#define OPTIONROM_SPI_REGIONID_MASK REG_GENMASK(7, 0)
-#define OROM_OFFSET _MMIO(0x1020c0)
-#define OROM_OFFSET_MASK REG_GENMASK(20, 16)
-
-#define MTL_MEM_SS_INFO_GLOBAL _MMIO(0x45700)
-#define XE3P_ECC_IMPACTING_DE REG_BIT(12)
-#define MTL_N_OF_ENABLED_QGV_POINTS_MASK REG_GENMASK(11, 8)
-#define MTL_N_OF_POPULATED_CH_MASK REG_GENMASK(7, 4)
-#define MTL_DDR_TYPE_MASK REG_GENMASK(3, 0)
-
#define MTL_MEDIA_GSI_BASE 0x380000
+#define DSPCLK_GATE_D _MMIO(0x6200)
+# define OVRUNIT_CLOCK_GATE_DISABLE (1 << 3)
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_reg_defs.h b/drivers/gpu/drm/i915/i915_reg_defs.h
index e81fac8ab51b..e897d3ccbf9e 100644
--- a/drivers/gpu/drm/i915/i915_reg_defs.h
+++ b/drivers/gpu/drm/i915/i915_reg_defs.h
@@ -6,183 +6,8 @@
#ifndef __I915_REG_DEFS__
#define __I915_REG_DEFS__
-#include <linux/bitfield.h>
-#include <linux/bits.h>
-
-/*
- * Wrappers over the generic fixed width BIT_U*() and GENMASK_U*()
- * implementations, for compatibility reasons with previous implementation.
- */
-#define REG_GENMASK(high, low) GENMASK_U32(high, low)
-#define REG_GENMASK64(high, low) GENMASK_U64(high, low)
-#define REG_GENMASK16(high, low) GENMASK_U16(high, low)
-#define REG_GENMASK8(high, low) GENMASK_U8(high, low)
-
-#define REG_BIT(n) BIT_U32(n)
-#define REG_BIT64(n) BIT_U64(n)
-#define REG_BIT16(n) BIT_U16(n)
-#define REG_BIT8(n) BIT_U8(n)
-
-/*
- * Local integer constant expression version of is_power_of_2().
- */
-#define IS_POWER_OF_2(__x) ((__x) && (((__x) & ((__x) - 1)) == 0))
-
-/**
- * REG_FIELD_PREP() - Prepare a u32 bitfield value
- * @__mask: shifted mask defining the field's length and position
- * @__val: value to put in the field
- *
- * Local copy of FIELD_PREP() to generate an integer constant expression, force
- * u32 and for consistency with REG_FIELD_GET(), REG_BIT() and REG_GENMASK().
- *
- * @return: @__val masked and shifted into the field defined by @__mask.
- */
-#define REG_FIELD_PREP(__mask, __val) \
- ((u32)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) + \
- BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) + \
- BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U32_MAX) + \
- BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \
- BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))
-
-/**
- * REG_FIELD_PREP8() - Prepare a u8 bitfield value
- * @__mask: shifted mask defining the field's length and position
- * @__val: value to put in the field
- *
- * Local copy of FIELD_PREP() to generate an integer constant expression, force
- * u8 and for consistency with REG_FIELD_GET8(), REG_BIT8() and REG_GENMASK8().
- *
- * @return: @__val masked and shifted into the field defined by @__mask.
- */
-#define REG_FIELD_PREP8(__mask, __val) \
- ((u8)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) + \
- BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) + \
- BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U8_MAX) + \
- BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \
- BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))
-
-/**
- * REG_FIELD_GET() - Extract a u32 bitfield value
- * @__mask: shifted mask defining the field's length and position
- * @__val: value to extract the bitfield value from
- *
- * Local wrapper for FIELD_GET() to force u32 and for consistency with
- * REG_FIELD_PREP(), REG_BIT() and REG_GENMASK().
- *
- * @return: Masked and shifted value of the field defined by @__mask in @__val.
- */
-#define REG_FIELD_GET(__mask, __val) ((u32)FIELD_GET(__mask, __val))
-
-/**
- * REG_FIELD_GET64() - Extract a u64 bitfield value
- * @__mask: shifted mask defining the field's length and position
- * @__val: value to extract the bitfield value from
- *
- * Local wrapper for FIELD_GET() to force u64 and for consistency with
- * REG_GENMASK64().
- *
- * @return: Masked and shifted value of the field defined by @__mask in @__val.
- */
-#define REG_FIELD_GET64(__mask, __val) ((u64)FIELD_GET(__mask, __val))
-
-
-/**
- * REG_FIELD_PREP16() - Prepare a u16 bitfield value
- * @__mask: shifted mask defining the field's length and position
- * @__val: value to put in the field
- *
- * Local copy of FIELD_PREP16() to generate an integer constant
- * expression, force u8 and for consistency with
- * REG_FIELD_GET16(), REG_BIT16() and REG_GENMASK16().
- *
- * @return: @__val masked and shifted into the field defined by @__mask.
- */
-#define REG_FIELD_PREP16(__mask, __val) \
- ((u16)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) + \
- BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) + \
- BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U16_MAX) + \
- BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \
- BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))
-
-#define __MASKED_FIELD(mask, value) ((mask) << 16 | (value))
-#define _MASKED_FIELD(mask, value) ({ \
- if (__builtin_constant_p(mask)) \
- BUILD_BUG_ON_MSG(((mask) & 0xffff0000), "Incorrect mask"); \
- if (__builtin_constant_p(value)) \
- BUILD_BUG_ON_MSG((value) & 0xffff0000, "Incorrect value"); \
- if (__builtin_constant_p(mask) && __builtin_constant_p(value)) \
- BUILD_BUG_ON_MSG((value) & ~(mask), \
- "Incorrect value for mask"); \
- __MASKED_FIELD(mask, value); })
-#define _MASKED_BIT_ENABLE(a) ({ typeof(a) _a = (a); _MASKED_FIELD(_a, _a); })
-#define _MASKED_BIT_DISABLE(a) (_MASKED_FIELD((a), 0))
-
-/*
- * Given the first two numbers __a and __b of arbitrarily many evenly spaced
- * numbers, pick the 0-based __index'th value.
- *
- * Always prefer this over _PICK() if the numbers are evenly spaced.
- */
-#define _PICK_EVEN(__index, __a, __b) ((__a) + (__index) * ((__b) - (__a)))
-
-/*
- * Like _PICK_EVEN(), but supports 2 ranges of evenly spaced address offsets.
- * @__c_index corresponds to the index in which the second range starts to be
- * used. Using math interval notation, the first range is used for indexes [ 0,
- * @__c_index), while the second range is used for [ @__c_index, ... ). Example:
- *
- * #define _FOO_A 0xf000
- * #define _FOO_B 0xf004
- * #define _FOO_C 0xf008
- * #define _SUPER_FOO_A 0xa000
- * #define _SUPER_FOO_B 0xa100
- * #define FOO(x) _MMIO(_PICK_EVEN_2RANGES(x, 3, \
- * _FOO_A, _FOO_B, \
- * _SUPER_FOO_A, _SUPER_FOO_B))
- *
- * This expands to:
- * 0: 0xf000,
- * 1: 0xf004,
- * 2: 0xf008,
- * 3: 0xa000,
- * 4: 0xa100,
- * 5: 0xa200,
- * ...
- */
-#define _PICK_EVEN_2RANGES(__index, __c_index, __a, __b, __c, __d) \
- (BUILD_BUG_ON_ZERO(!__is_constexpr(__c_index)) + \
- ((__index) < (__c_index) ? _PICK_EVEN(__index, __a, __b) : \
- _PICK_EVEN((__index) - (__c_index), __c, __d)))
-
-/*
- * Given the arbitrary numbers in varargs, pick the 0-based __index'th number.
- *
- * Always prefer _PICK_EVEN() over this if the numbers are evenly spaced.
- */
-#define _PICK(__index, ...) (((const u32 []){ __VA_ARGS__ })[__index])
-
-/**
- * REG_FIELD_GET8() - Extract a u8 bitfield value
- * @__mask: shifted mask defining the field's length and position
- * @__val: value to extract the bitfield value from
- *
- * Local wrapper for FIELD_GET() to force u8 and for consistency with
- * REG_FIELD_PREP(), REG_BIT() and REG_GENMASK().
- *
- * @return: Masked and shifted value of the field defined by @__mask in @__val.
- */
-#define REG_FIELD_GET8(__mask, __val) ((u8)FIELD_GET(__mask, __val))
-
-/**
- * REG_FIELD_MAX() - produce the maximum value representable by a field
- * @__mask: shifted mask defining the field's length and position
- *
- * Local wrapper for FIELD_MAX() to return the maximum bit value that can
- * be held in the field specified by @_mask, cast to u32 for consistency
- * with other macros.
- */
-#define REG_FIELD_MAX(__mask) ((u32)FIELD_MAX(__mask))
+#include <drm/intel/pick.h>
+#include <drm/intel/reg_bits.h>
typedef struct {
u32 reg;
diff --git a/drivers/gpu/drm/i915/i915_sw_fence_work.c b/drivers/gpu/drm/i915/i915_sw_fence_work.c
index d2e56b387993..366418108f78 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence_work.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence_work.c
@@ -38,7 +38,7 @@ fence_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
if (test_bit(DMA_FENCE_WORK_IMM, &f->dma.flags))
fence_work(&f->work);
else
- queue_work(system_unbound_wq, &f->work);
+ queue_work(system_dfl_wq, &f->work);
} else {
fence_complete(f);
}
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index afc192d9931b..6a3a4d4244dc 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -27,6 +27,7 @@
#include <drm/drm_gem.h>
#include <drm/drm_print.h>
+#include <drm/intel/display_parent_interface.h>
#include "display/intel_fb.h"
#include "display/intel_frontbuffer.h"
@@ -2332,3 +2333,12 @@ int __init i915_vma_module_init(void)
return 0;
}
+
+static int i915_vma_fence_id(const struct i915_vma *vma)
+{
+ return vma->fence ? vma->fence->id : -1;
+}
+
+const struct intel_display_vma_interface i915_display_vma_interface = {
+ .fence_id = i915_vma_fence_id,
+};
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 8054047840aa..fa2d9b429db6 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -404,11 +404,6 @@ i915_vma_unpin_fence(struct i915_vma *vma)
__i915_vma_unpin_fence(vma);
}
-static inline int i915_vma_fence_id(const struct i915_vma *vma)
-{
- return vma->fence ? vma->fence->id : -1;
-}
-
void i915_vma_parked(struct intel_gt *gt);
static inline bool i915_vma_is_scanout(const struct i915_vma *vma)
@@ -481,4 +476,6 @@ int i915_vma_module_init(void);
I915_SELFTEST_DECLARE(int i915_vma_get_pages(struct i915_vma *vma));
I915_SELFTEST_DECLARE(void i915_vma_put_pages(struct i915_vma *vma));
+extern const struct intel_display_vma_interface i915_display_vma_interface;
+
#endif
diff --git a/drivers/gpu/drm/i915/i915_vma_resource.c b/drivers/gpu/drm/i915/i915_vma_resource.c
index 53d619ef0c3d..a8f2112ce81f 100644
--- a/drivers/gpu/drm/i915/i915_vma_resource.c
+++ b/drivers/gpu/drm/i915/i915_vma_resource.c
@@ -202,7 +202,7 @@ i915_vma_resource_fence_notify(struct i915_sw_fence *fence,
i915_vma_resource_unbind_work(&vma_res->work);
} else {
INIT_WORK(&vma_res->work, i915_vma_resource_unbind_work);
- queue_work(system_unbound_wq, &vma_res->work);
+ queue_work(system_dfl_wq, &vma_res->work);
}
break;
case FENCE_FREE:
diff --git a/drivers/gpu/drm/i915/intel_clock_gating.c b/drivers/gpu/drm/i915/intel_clock_gating.c
index 7336934bb934..68a6f94f2a37 100644
--- a/drivers/gpu/drm/i915/intel_clock_gating.c
+++ b/drivers/gpu/drm/i915/intel_clock_gating.c
@@ -26,11 +26,13 @@
*/
#include <drm/drm_print.h>
+#include <drm/intel/intel_gmd_misc_regs.h>
+#include <drm/intel/intel_gmd_interrupt_regs.h>
#include "display/i9xx_plane_regs.h"
#include "display/intel_display.h"
#include "display/intel_display_core.h"
-
+#include "display/intel_display_regs.h"
#include "gt/intel_engine_regs.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_mcr.h"
@@ -452,7 +454,7 @@ static void bdw_init_clock_gating(struct drm_i915_private *i915)
GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME, 0);
intel_uncore_write(&i915->uncore, RING_PSMI_CTL(RENDER_RING_BASE),
- _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
+ REG_MASKED_FIELD_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
/* WaDisableSDEUnitClockGating:bdw */
intel_uncore_rmw(&i915->uncore, GEN8_UCGCTL6, 0, GEN8_SDEUNIT_CLOCK_GATE_DISABLE);
@@ -517,13 +519,13 @@ static void ivb_init_clock_gating(struct drm_i915_private *i915)
if (INTEL_INFO(i915)->gt == 1)
intel_uncore_write(&i915->uncore, GEN7_ROW_CHICKEN2,
- _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+ REG_MASKED_FIELD_ENABLE(DOP_CLOCK_GATING_DISABLE));
else {
/* must write both registers */
intel_uncore_write(&i915->uncore, GEN7_ROW_CHICKEN2,
- _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+ REG_MASKED_FIELD_ENABLE(DOP_CLOCK_GATING_DISABLE));
intel_uncore_write(&i915->uncore, GEN7_ROW_CHICKEN2_GT2,
- _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+ REG_MASKED_FIELD_ENABLE(DOP_CLOCK_GATING_DISABLE));
}
/*
@@ -557,7 +559,7 @@ static void vlv_init_clock_gating(struct drm_i915_private *i915)
/* WaDisableDopClockGating:vlv */
intel_uncore_write(&i915->uncore, GEN7_ROW_CHICKEN2,
- _MASKED_BIT_ENABLE(DOP_CLOCK_GATING_DISABLE));
+ REG_MASKED_FIELD_ENABLE(DOP_CLOCK_GATING_DISABLE));
/* This is required by WaCatErrorRejectionIssue:vlv */
intel_uncore_rmw(&i915->uncore, GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
@@ -592,7 +594,7 @@ static void chv_init_clock_gating(struct drm_i915_private *i915)
/* WaDisableSemaphoreAndSyncFlipWait:chv */
intel_uncore_write(&i915->uncore, RING_PSMI_CTL(RENDER_RING_BASE),
- _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
+ REG_MASKED_FIELD_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE));
/* WaDisableCSUnitClockGating:chv */
intel_uncore_rmw(&i915->uncore, GEN6_UCGCTL1, 0, GEN6_CSUNIT_CLOCK_GATE_DISABLE);
@@ -638,7 +640,7 @@ static void i965gm_init_clock_gating(struct drm_i915_private *i915)
intel_uncore_write16(uncore, DEUC, 0);
intel_uncore_write(uncore,
MI_ARB_STATE,
- _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
+ REG_MASKED_FIELD_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
}
static void i965g_init_clock_gating(struct drm_i915_private *i915)
@@ -650,7 +652,7 @@ static void i965g_init_clock_gating(struct drm_i915_private *i915)
I965_FBC_CLOCK_GATE_DISABLE);
intel_uncore_write(&i915->uncore, RENCLK_GATE_D2, 0);
intel_uncore_write(&i915->uncore, MI_ARB_STATE,
- _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
+ REG_MASKED_FIELD_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
}
static void gen3_init_clock_gating(struct drm_i915_private *i915)
@@ -663,21 +665,21 @@ static void gen3_init_clock_gating(struct drm_i915_private *i915)
if (IS_PINEVIEW(i915))
intel_uncore_write(&i915->uncore, ECOSKPD(RENDER_RING_BASE),
- _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY));
+ REG_MASKED_FIELD_ENABLE(ECO_GATING_CX_ONLY));
/* IIR "flip pending" means done if this bit is set */
intel_uncore_write(&i915->uncore, ECOSKPD(RENDER_RING_BASE),
- _MASKED_BIT_DISABLE(ECO_FLIP_DONE));
+ REG_MASKED_FIELD_DISABLE(ECO_FLIP_DONE));
/* interrupts should cause a wake up from C3 */
- intel_uncore_write(&i915->uncore, INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN));
+ intel_uncore_write(&i915->uncore, INSTPM, REG_MASKED_FIELD_ENABLE(INSTPM_AGPBUSY_INT_EN));
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
intel_uncore_write(&i915->uncore, MI_ARB_STATE,
- _MASKED_BIT_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
+ REG_MASKED_FIELD_ENABLE(MI_ARB_C3_LP_WRITE_ENABLE));
intel_uncore_write(&i915->uncore, MI_ARB_STATE,
- _MASKED_BIT_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
+ REG_MASKED_FIELD_ENABLE(MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE));
}
static void i85x_init_clock_gating(struct drm_i915_private *i915)
@@ -685,11 +687,11 @@ static void i85x_init_clock_gating(struct drm_i915_private *i915)
intel_uncore_write(&i915->uncore, RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE);
/* interrupts should cause a wake up from C3 */
- intel_uncore_write(&i915->uncore, MI_STATE, _MASKED_BIT_ENABLE(MI_AGPBUSY_INT_EN) |
- _MASKED_BIT_DISABLE(MI_AGPBUSY_830_MODE));
+ intel_uncore_write(&i915->uncore, MI_STATE, REG_MASKED_FIELD_ENABLE(MI_AGPBUSY_INT_EN) |
+ REG_MASKED_FIELD_DISABLE(MI_AGPBUSY_830_MODE));
intel_uncore_write(&i915->uncore, MEM_MODE,
- _MASKED_BIT_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
+ REG_MASKED_FIELD_ENABLE(MEM_DISPLAY_TRICKLE_FEED_DISABLE));
/*
* Have FBC ignore 3D activity since we use software
@@ -699,14 +701,14 @@ static void i85x_init_clock_gating(struct drm_i915_private *i915)
* until a 2D blit occurs.
*/
intel_uncore_write(&i915->uncore, SCPD0,
- _MASKED_BIT_ENABLE(SCPD_FBC_IGNORE_3D));
+ REG_MASKED_FIELD_ENABLE(SCPD_FBC_IGNORE_3D));
}
static void i830_init_clock_gating(struct drm_i915_private *i915)
{
intel_uncore_write(&i915->uncore, MEM_MODE,
- _MASKED_BIT_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
- _MASKED_BIT_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
+ REG_MASKED_FIELD_ENABLE(MEM_DISPLAY_A_TRICKLE_FEED_DISABLE) |
+ REG_MASKED_FIELD_ENABLE(MEM_DISPLAY_B_TRICKLE_FEED_DISABLE));
}
void intel_clock_gating_init(struct drm_device *drm)
diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
index 478d00f89a4b..ae42818ab6e0 100644
--- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
+++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c
@@ -3,6 +3,11 @@
* Copyright © 2020 Intel Corporation
*/
+#include <drm/intel/intel_pcode_regs.h>
+#include <drm/intel/intel_gmd_misc_regs.h>
+
+#include <drm/intel/intel_gmd_interrupt_regs.h>
+
#include "display/bxt_dpio_phy_regs.h"
#include "display/i9xx_plane_regs.h"
#include "display/i9xx_wm_regs.h"
@@ -11,12 +16,12 @@
#include "display/intel_color_regs.h"
#include "display/intel_crt_regs.h"
#include "display/intel_cursor_regs.h"
-#include "display/intel_display_core.h"
+#include "display/intel_display_limits.h"
#include "display/intel_display_regs.h"
-#include "display/intel_display_types.h"
#include "display/intel_dmc_regs.h"
#include "display/intel_dp_aux_regs.h"
#include "display/intel_dpio_phy.h"
+#include "display/intel_fbc.h"
#include "display/intel_fbc_regs.h"
#include "display/intel_fdi_regs.h"
#include "display/intel_lvds_regs.h"
@@ -32,6 +37,7 @@
#include "gt/intel_engine_regs.h"
#include "gt/intel_gt_regs.h"
+#include "gvt/display_helpers.h"
#include "gvt/reg.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/intel_pcode.c b/drivers/gpu/drm/i915/intel_pcode.c
index 756652b8ec97..c07d48fc1b35 100644
--- a/drivers/gpu/drm/i915/intel_pcode.c
+++ b/drivers/gpu/drm/i915/intel_pcode.c
@@ -4,6 +4,8 @@
*/
#include <drm/drm_print.h>
+#include <drm/intel/display_parent_interface.h>
+#include <drm/intel/intel_pcode_regs.h>
#include "i915_drv.h"
#include "i915_reg.h"
@@ -276,26 +278,31 @@ int snb_pcode_write_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u3
return err;
}
-/* Helpers with drm device */
-int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1)
+static int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1)
{
struct drm_i915_private *i915 = to_i915(drm);
return snb_pcode_read(&i915->uncore, mbox, val, val1);
}
-int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms)
+static int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms)
{
struct drm_i915_private *i915 = to_i915(drm);
return snb_pcode_write_timeout(&i915->uncore, mbox, val, timeout_ms);
}
-int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request,
- u32 reply_mask, u32 reply, int timeout_base_ms)
+static int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms)
{
struct drm_i915_private *i915 = to_i915(drm);
return skl_pcode_request(&i915->uncore, mbox, request, reply_mask, reply,
timeout_base_ms);
}
+
+const struct intel_display_pcode_interface i915_display_pcode_interface = {
+ .read = intel_pcode_read,
+ .write = intel_pcode_write_timeout,
+ .request = intel_pcode_request,
+};
diff --git a/drivers/gpu/drm/i915/intel_pcode.h b/drivers/gpu/drm/i915/intel_pcode.h
index c91a821a88d4..19795ea8172e 100644
--- a/drivers/gpu/drm/i915/intel_pcode.h
+++ b/drivers/gpu/drm/i915/intel_pcode.h
@@ -27,13 +27,6 @@ int intel_pcode_init(struct intel_uncore *uncore);
int snb_pcode_read_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 *val);
int snb_pcode_write_p(struct intel_uncore *uncore, u32 mbcmd, u32 p1, u32 p2, u32 val);
-/* Helpers with drm device */
-int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1);
-int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms);
-#define intel_pcode_write(drm, mbox, val) \
- intel_pcode_write_timeout((drm), (mbox), (val), 1)
-
-int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request,
- u32 reply_mask, u32 reply, int timeout_base_ms);
+extern const struct intel_display_pcode_interface i915_display_pcode_interface;
#endif /* _INTEL_PCODE_H */
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index bccedd59a114..5b698d4d7a7f 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -132,8 +132,8 @@ intel_uncore_forcewake_domain_to_str(const enum forcewake_domain_id id)
}
#define fw_ack(d) readl((d)->reg_ack)
-#define fw_set(d, val) writel(_MASKED_BIT_ENABLE((val)), (d)->reg_set)
-#define fw_clear(d, val) writel(_MASKED_BIT_DISABLE((val)), (d)->reg_set)
+#define fw_set(d, val) writel(REG_MASKED_FIELD_ENABLE((val)), (d)->reg_set)
+#define fw_clear(d, val) writel(REG_MASKED_FIELD_DISABLE((val)), (d)->reg_set)
static inline void
fw_domain_reset(const struct intel_uncore_forcewake_domain *d)
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp.c b/drivers/gpu/drm/i915/pxp/intel_pxp.c
index f7ed4e18a3ab..3d7f045f662d 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp.c
@@ -66,8 +66,8 @@ bool intel_pxp_is_active(const struct intel_pxp *pxp)
static void kcr_pxp_set_status(const struct intel_pxp *pxp, bool enable)
{
- u32 val = enable ? _MASKED_BIT_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES) :
- _MASKED_BIT_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES);
+ u32 val = enable ? REG_MASKED_FIELD_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES) :
+ REG_MASKED_FIELD_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES);
intel_uncore_write(pxp->ctrl_gt->uncore, KCR_INIT(pxp->kcr_base), val);
}
@@ -278,7 +278,7 @@ static void pxp_queue_termination(struct intel_pxp *pxp)
spin_lock_irq(gt->irq_lock);
intel_pxp_mark_termination_in_progress(pxp);
pxp->session_events |= PXP_TERMINATION_REQUEST;
- queue_work(system_unbound_wq, &pxp->session_work);
+ queue_work(system_dfl_wq, &pxp->session_work);
spin_unlock_irq(gt->irq_lock);
}
diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
index d81750b9bdda..735325e828bc 100644
--- a/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
+++ b/drivers/gpu/drm/i915/pxp/intel_pxp_irq.c
@@ -48,7 +48,7 @@ void intel_pxp_irq_handler(struct intel_pxp *pxp, u16 iir)
pxp->session_events |= PXP_TERMINATION_COMPLETE | PXP_EVENT_TYPE_IRQ;
if (pxp->session_events)
- queue_work(system_unbound_wq, &pxp->session_work);
+ queue_work(system_dfl_wq, &pxp->session_work);
}
static inline void __pxp_set_interrupts(struct intel_gt *gt, u32 interrupts)
diff --git a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
index 6c376338bb37..6a305322e30d 100644
--- a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
@@ -526,7 +526,7 @@ static int test_ipc(void *arg)
struct workqueue_struct *wq;
int ret = 0;
- wq = alloc_workqueue("i1915-selftest", 0, 0);
+ wq = alloc_workqueue("i1915-selftest", WQ_PERCPU, 0);
if (wq == NULL)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/selftests/igt_mmap.c b/drivers/gpu/drm/i915/selftests/igt_mmap.c
index e920a461bd36..106d5c0dfcbc 100644
--- a/drivers/gpu/drm/i915/selftests/igt_mmap.c
+++ b/drivers/gpu/drm/i915/selftests/igt_mmap.c
@@ -9,14 +9,14 @@
#include "i915_drv.h"
#include "igt_mmap.h"
-unsigned long igt_mmap_offset(struct drm_i915_private *i915,
- u64 offset,
- unsigned long size,
- unsigned long prot,
- unsigned long flags)
+unsigned long igt_mmap_offset_with_file(struct drm_i915_private *i915,
+ u64 offset,
+ unsigned long size,
+ unsigned long prot,
+ unsigned long flags,
+ struct file *file)
{
struct drm_vma_offset_node *node;
- struct file *file;
unsigned long addr;
int err;
@@ -31,22 +31,35 @@ unsigned long igt_mmap_offset(struct drm_i915_private *i915,
return -ENOENT;
}
- /* Pretend to open("/dev/dri/card0") */
- file = mock_drm_getfile(i915->drm.primary, O_RDWR);
- if (IS_ERR(file))
- return PTR_ERR(file);
-
err = drm_vma_node_allow(node, file->private_data);
if (err) {
- addr = err;
- goto out_file;
+ return err;
}
addr = vm_mmap(file, 0, drm_vma_node_size(node) << PAGE_SHIFT,
prot, flags, drm_vma_node_offset_addr(node));
drm_vma_node_revoke(node, file->private_data);
-out_file:
+
+ return addr;
+}
+
+unsigned long igt_mmap_offset(struct drm_i915_private *i915,
+ u64 offset,
+ unsigned long size,
+ unsigned long prot,
+ unsigned long flags)
+{
+ struct file *file;
+ unsigned long addr;
+
+ /* Pretend to open("/dev/dri/card0") */
+ file = mock_drm_getfile(i915->drm.primary, O_RDWR);
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ addr = igt_mmap_offset_with_file(i915, offset, size, prot, flags, file);
fput(file);
+
return addr;
}
diff --git a/drivers/gpu/drm/i915/selftests/igt_mmap.h b/drivers/gpu/drm/i915/selftests/igt_mmap.h
index acbe34d81a6d..7b177b44cd3c 100644
--- a/drivers/gpu/drm/i915/selftests/igt_mmap.h
+++ b/drivers/gpu/drm/i915/selftests/igt_mmap.h
@@ -11,6 +11,7 @@
struct drm_i915_private;
struct drm_vma_offset_node;
+struct file;
unsigned long igt_mmap_offset(struct drm_i915_private *i915,
u64 offset,
@@ -18,4 +19,11 @@ unsigned long igt_mmap_offset(struct drm_i915_private *i915,
unsigned long prot,
unsigned long flags);
+unsigned long igt_mmap_offset_with_file(struct drm_i915_private *i915,
+ u64 offset,
+ unsigned long size,
+ unsigned long prot,
+ unsigned long flags,
+ struct file *file);
+
#endif /* IGT_MMAP_H */
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 210b9f8f7b61..27dc0e40a8d7 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -223,7 +223,7 @@ struct drm_i915_private *mock_gem_device(void)
if (!i915->wq)
goto err_drv;
- i915->unordered_wq = alloc_workqueue("mock-unordered", 0, 0);
+ i915->unordered_wq = alloc_workqueue("mock-unordered", WQ_PERCPU, 0);
if (!i915->unordered_wq)
goto err_wq;
diff --git a/drivers/gpu/drm/i915/vlv_suspend.c b/drivers/gpu/drm/i915/vlv_suspend.c
index dac4b9bac743..3612b03cabd1 100644
--- a/drivers/gpu/drm/i915/vlv_suspend.c
+++ b/drivers/gpu/drm/i915/vlv_suspend.c
@@ -7,6 +7,7 @@
#include <linux/kernel.h>
#include <drm/drm_print.h>
+#include <drm/intel/intel_gmd_interrupt_regs.h>
#include "gt/intel_gt_regs.h"
diff --git a/drivers/gpu/drm/imagination/pvr_gem.c b/drivers/gpu/drm/imagination/pvr_gem.c
index 686a3fe22986..d8660d6a8e01 100644
--- a/drivers/gpu/drm/imagination/pvr_gem.c
+++ b/drivers/gpu/drm/imagination/pvr_gem.c
@@ -25,7 +25,10 @@
static void pvr_gem_object_free(struct drm_gem_object *obj)
{
- drm_gem_shmem_object_free(obj);
+ struct drm_gem_shmem_object *shmem_obj = to_drm_gem_shmem_obj(obj);
+
+ shmem_obj->pages_mark_dirty_on_put = true;
+ drm_gem_shmem_free(shmem_obj);
}
static struct dma_buf *pvr_gem_export(struct drm_gem_object *obj, int flags)
@@ -363,7 +366,6 @@ pvr_gem_object_create(struct pvr_device *pvr_dev, size_t size, u64 flags)
if (IS_ERR(shmem_obj))
return ERR_CAST(shmem_obj);
- shmem_obj->pages_mark_dirty_on_put = true;
shmem_obj->map_wc = !(flags & PVR_BO_CPU_CACHED);
pvr_obj = shmem_gem_to_pvr_gem(shmem_obj);
pvr_obj->flags = flags;
diff --git a/drivers/gpu/drm/loongson/lsdc_drv.c b/drivers/gpu/drm/loongson/lsdc_drv.c
index abf5bf68eec2..1ece1ea42f78 100644
--- a/drivers/gpu/drm/loongson/lsdc_drv.c
+++ b/drivers/gpu/drm/loongson/lsdc_drv.c
@@ -13,6 +13,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fbdev_ttm.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_print.h>
@@ -45,7 +46,7 @@ static const struct drm_driver lsdc_drm_driver = {
.debugfs_init = lsdc_debugfs_init,
.dumb_create = lsdc_dumb_create,
- .dumb_map_offset = lsdc_dumb_map_offset,
+ .dumb_map_offset = drm_gem_ttm_dumb_map_offset,
.gem_prime_import_sg_table = lsdc_prime_import_sg_table,
DRM_FBDEV_TTM_DRIVER_OPS,
};
diff --git a/drivers/gpu/drm/loongson/lsdc_gem.c b/drivers/gpu/drm/loongson/lsdc_gem.c
index 6372db2d3093..2fb03487c983 100644
--- a/drivers/gpu/drm/loongson/lsdc_gem.c
+++ b/drivers/gpu/drm/loongson/lsdc_gem.c
@@ -234,21 +234,6 @@ int lsdc_dumb_create(struct drm_file *file, struct drm_device *ddev,
return 0;
}
-int lsdc_dumb_map_offset(struct drm_file *filp, struct drm_device *ddev,
- u32 handle, uint64_t *offset)
-{
- struct drm_gem_object *gobj;
-
- gobj = drm_gem_object_lookup(filp, handle);
- if (!gobj)
- return -ENOENT;
-
- *offset = drm_vma_node_offset_addr(&gobj->vma_node);
-
- drm_gem_object_put(gobj);
-
- return 0;
-}
void lsdc_gem_init(struct drm_device *ddev)
{
diff --git a/drivers/gpu/drm/loongson/lsdc_gem.h b/drivers/gpu/drm/loongson/lsdc_gem.h
index 92cbb10e6e13..96729b3eebec 100644
--- a/drivers/gpu/drm/loongson/lsdc_gem.h
+++ b/drivers/gpu/drm/loongson/lsdc_gem.h
@@ -14,10 +14,6 @@ lsdc_prime_import_sg_table(struct drm_device *ddev,
struct dma_buf_attachment *attach,
struct sg_table *sg);
-int lsdc_dumb_map_offset(struct drm_file *file,
- struct drm_device *dev,
- u32 handle,
- uint64_t *offset);
int lsdc_dumb_create(struct drm_file *file,
struct drm_device *ddev,
diff --git a/drivers/gpu/drm/mxsfb/lcdif_drv.c b/drivers/gpu/drm/mxsfb/lcdif_drv.c
index fcb2a7517377..47da1d9336b9 100644
--- a/drivers/gpu/drm/mxsfb/lcdif_drv.c
+++ b/drivers/gpu/drm/mxsfb/lcdif_drv.c
@@ -149,15 +149,17 @@ static int lcdif_load(struct drm_device *drm)
lcdif->clk = devm_clk_get(drm->dev, "pix");
if (IS_ERR(lcdif->clk))
- return PTR_ERR(lcdif->clk);
+ return dev_err_probe(drm->dev, PTR_ERR(lcdif->clk), "Failed to get pix clock\n");
lcdif->clk_axi = devm_clk_get(drm->dev, "axi");
if (IS_ERR(lcdif->clk_axi))
- return PTR_ERR(lcdif->clk_axi);
+ return dev_err_probe(drm->dev, PTR_ERR(lcdif->clk_axi),
+ "Failed to get axi clock\n");
lcdif->clk_disp_axi = devm_clk_get(drm->dev, "disp_axi");
if (IS_ERR(lcdif->clk_disp_axi))
- return PTR_ERR(lcdif->clk_disp_axi);
+ return dev_err_probe(drm->dev, PTR_ERR(lcdif->clk_disp_axi),
+ "Failed to get disp_axi clock\n");
platform_set_drvdata(pdev, drm);
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 79264f7bbd0e..a99f2e2a49fe 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -443,6 +443,15 @@ config DRM_PANEL_LG_SW43408
pixel. It provides a MIPI DSI interface to the host and has a
built-in LED backlight.
+config DRM_PANEL_LXD_M9189A
+ tristate "LXD M9189A MIPI-DSI LCD panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y if you want to enable support for the LXD M9189A 4-Lane
+ 1024x600 MIPI DSI panel.
+
config DRM_PANEL_MAGNACHIP_D53E6EA8966
tristate "Magnachip D53E6EA8966 DSI panel"
depends on OF && SPI
@@ -465,6 +474,18 @@ config DRM_PANEL_MANTIX_MLAF057WE51
has a resolution of 720x1440 pixels, a built in backlight and touch
controller.
+config DRM_PANEL_MOTOROLA_MOT
+ tristate "Atrix 4G and Droid X2 540x960 DSI video mode panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ select VIDEOMODE_HELPERS
+ help
+ Say Y here if you want to enable support for the LCD panel module
+ for Motorola Atrix 4G or Droid X2. Exact panel vendor and model are
+ unknown. The panel has a 540x960 resolution and uses 24 bit RGB per
+ pixel.
+
config DRM_PANEL_NEC_NL8048HL11
tristate "NEC NL8048HL11 RGB panel"
depends on GPIOLIB && OF && SPI
@@ -554,6 +575,15 @@ config DRM_PANEL_NOVATEK_NT36672E
LCD panel module. The panel has a resolution of 1080x2408 and uses 24 bit
RGB per pixel.
+config DRM_PANEL_NOVATEK_NT37700F
+ tristate "Novatek NT37700F DSI panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for Novatek NT37700F DSI
+ panel module. The panel has a resolution of 1080x2160.
+
config DRM_PANEL_NOVATEK_NT37801
tristate "Novatek NT37801/NT37810 AMOLED DSI panel"
depends on OF
@@ -689,6 +719,7 @@ config DRM_PANEL_RENESAS_R61307
depends on OF
depends on DRM_MIPI_DSI
depends on BACKLIGHT_CLASS_DEVICE
+ select VIDEOMODE_HELPERS
help
Say Y here if you want to enable support for KOE tx13d100vm0eaa
IPS-LCD module with Renesas R69328 IC. The panel has a 1024x768
@@ -702,6 +733,7 @@ config DRM_PANEL_RENESAS_R69328
depends on OF
depends on DRM_MIPI_DSI
depends on BACKLIGHT_CLASS_DEVICE
+ select VIDEOMODE_HELPERS
help
Say Y here if you want to enable support for JDI dx12d100vm0eaa
IPS-LCD module with Renesas R69328 IC. The panel has a 720x1280
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index aeffaa95666d..3336a2c0cd86 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -44,7 +44,9 @@ obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o
obj-$(CONFIG_DRM_PANEL_LG_LD070WX3) += panel-lg-ld070wx3.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
obj-$(CONFIG_DRM_PANEL_LG_SW43408) += panel-lg-sw43408.o
+obj-$(CONFIG_DRM_PANEL_LXD_M9189A) += panel-lxd-m9189a.o
obj-$(CONFIG_DRM_PANEL_MAGNACHIP_D53E6EA8966) += panel-magnachip-d53e6ea8966.o
+obj-$(CONFIG_DRM_PANEL_MOTOROLA_MOT) += panel-motorola-mot.o
obj-$(CONFIG_DRM_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
obj-$(CONFIG_DRM_PANEL_NEWVISION_NV3051D) += panel-newvision-nv3051d.o
obj-$(CONFIG_DRM_PANEL_NEWVISION_NV3052C) += panel-newvision-nv3052c.o
@@ -54,6 +56,7 @@ obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35950) += panel-novatek-nt35950.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36523) += panel-novatek-nt36523.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36672A) += panel-novatek-nt36672a.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT36672E) += panel-novatek-nt36672e.o
+obj-$(CONFIG_DRM_PANEL_NOVATEK_NT37700F) += panel-novatek-nt37700f.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT37801) += panel-novatek-nt37801.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o
obj-$(CONFIG_DRM_PANEL_MANTIX_MLAF057WE51) += panel-mantix-mlaf057we51.o
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c b/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c
index 8b2bfb7d3638..5f4e0d82ee67 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9882t.c
@@ -592,7 +592,7 @@ static int ili9882t_unprepare(struct drm_panel *panel)
{
struct ili9882t *ili = to_ili9882t(panel);
- gpiod_set_value(ili->enable_gpio, 0);
+ gpiod_set_value_cansleep(ili->enable_gpio, 0);
usleep_range(1000, 2000);
regulator_disable(ili->avee);
regulator_disable(ili->avdd);
@@ -608,7 +608,7 @@ static int ili9882t_prepare(struct drm_panel *panel)
struct ili9882t *ili = to_ili9882t(panel);
int ret;
- gpiod_set_value(ili->enable_gpio, 0);
+ gpiod_set_value_cansleep(ili->enable_gpio, 0);
usleep_range(1000, 1500);
ret = regulator_enable(ili->pp3300);
@@ -638,11 +638,11 @@ static int ili9882t_prepare(struct drm_panel *panel)
}
usleep_range(1000, 2000);
- gpiod_set_value(ili->enable_gpio, 1);
+ gpiod_set_value_cansleep(ili->enable_gpio, 1);
usleep_range(1000, 2000);
- gpiod_set_value(ili->enable_gpio, 0);
+ gpiod_set_value_cansleep(ili->enable_gpio, 0);
msleep(50);
- gpiod_set_value(ili->enable_gpio, 1);
+ gpiod_set_value_cansleep(ili->enable_gpio, 1);
usleep_range(6000, 10000);
ret = ili->desc->init(ili);
@@ -652,7 +652,7 @@ static int ili9882t_prepare(struct drm_panel *panel)
return 0;
poweroff:
- gpiod_set_value(ili->enable_gpio, 0);
+ gpiod_set_value_cansleep(ili->enable_gpio, 0);
regulator_disable(ili->avee);
poweroffavdd:
regulator_disable(ili->avdd);
@@ -793,7 +793,7 @@ static int ili9882t_add(struct ili9882t *ili)
return PTR_ERR(ili->enable_gpio);
}
- gpiod_set_value(ili->enable_gpio, 0);
+ gpiod_set_value_cansleep(ili->enable_gpio, 0);
err = of_drm_get_panel_orientation(dev->of_node, &ili->orientation);
if (err < 0) {
diff --git a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
index 5386a06fcd08..c33c611e03c0 100644
--- a/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
+++ b/drivers/gpu/drm/panel/panel-jadard-jd9365da-h3.c
@@ -1366,6 +1366,246 @@ static const struct jadard_panel_desc anbernic_rgds_display_desc = {
MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM,
};
+static int taiguan_xti05101_01a_init_cmds(struct jadard *jadard)
+{
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = jadard->dsi };
+
+ jd9365da_switch_page(&dsi_ctx, 0x00);
+ jadard_enable_standard_cmds(&dsi_ctx);
+
+ jd9365da_switch_page(&dsi_ctx, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0c, 0x74);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x17, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0xd7);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1a, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1b, 0xd7);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1c, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0xfe);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x37, 0x19);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x35, 0x28);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x38, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x39, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3a, 0x12);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3c, 0x78);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3d, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3e, 0xff);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3f, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x40, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x41, 0xa0);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x43, 0x1e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x44, 0x0b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x55, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x57, 0x6a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x59, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5a, 0x2e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x1a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x15);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5e, 0x58);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5f, 0x46);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x60, 0x39);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0x35);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0x27);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x63, 0x2b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x64, 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x66, 0x2e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x67, 0x2f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x68, 0x4d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x69, 0x3c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6a, 0x43);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6b, 0x36);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6c, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6d, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6e, 0x14);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x70, 0x7f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x71, 0x58);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x72, 0x46);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x73, 0x39);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x74, 0x35);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x75, 0x27);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x76, 0x2b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x77, 0x16);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x78, 0x30);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x79, 0x2e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7a, 0x2f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7b, 0x4d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7c, 0x3c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7d, 0x43);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7e, 0x36);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x7f, 0x31);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x80, 0x24);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x81, 0x14);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x82, 0x02);
+
+ jd9365da_switch_page(&dsi_ctx, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x52);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x01, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x03, 0x50);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x04, 0x77);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x05, 0x57);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x06, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x07, 0x4e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x08, 0x4c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x09, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0a, 0x4a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0b, 0x48);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0c, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0d, 0x46);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0e, 0x44);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0f, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x10, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x11, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x12, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x13, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x14, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x15, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x16, 0x53);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x17, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x18, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x19, 0x51);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1a, 0x77);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1b, 0x57);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1c, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1d, 0x4f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1e, 0x4d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x1f, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x20, 0x4b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x21, 0x49);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x22, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x23, 0x47);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x24, 0x45);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x25, 0x41);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x26, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x27, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x28, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x29, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2a, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x5f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2c, 0x13);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2d, 0x1f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2e, 0x1f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2f, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x30, 0x17);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x31, 0x17);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x32, 0x1f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x33, 0x0d);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x34, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x35, 0x1f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x36, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x37, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x38, 0x1f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x39, 0x09);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3a, 0x0b);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3b, 0x11);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3c, 0x1f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3d, 0x1f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3e, 0x1f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x3f, 0x1f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x40, 0x1f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x41, 0x1f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x42, 0x12);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x43, 0x1f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x44, 0x1f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x45, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x46, 0x17);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x47, 0x17);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x48, 0x1f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x49, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4a, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4b, 0x1f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4c, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4d, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4e, 0x1f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x4f, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x50, 0x0a);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x51, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x52, 0x1f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x53, 0x1f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x54, 0x1f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x55, 0x1f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x56, 0x1f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x57, 0x1f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x58, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5b, 0x10);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5c, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5d, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5e, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x5f, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x60, 0x40);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x61, 0x03);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x62, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x63, 0x6c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x64, 0x6c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x65, 0x75);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x66, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x67, 0xb4);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x68, 0x08);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x69, 0x6c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6a, 0x6c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6b, 0x0c);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6d, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6e, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x88);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x75, 0xbb);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x76, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x77, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x78, 0x2a);
+
+ jd9365da_switch_page(&dsi_ctx, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x00, 0x0e);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x02, 0xb3);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x09, 0x61);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x0e, 0x48);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x2b, 0x0f);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x37, 0x58);
+
+ jd9365da_switch_page(&dsi_ctx, 0x00);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+
+ mipi_dsi_msleep(&dsi_ctx, 120);
+
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+
+ mipi_dsi_msleep(&dsi_ctx, 20);
+
+ return dsi_ctx.accum_err;
+};
+
+static const struct jadard_panel_desc taiguan_xti05101_01a_desc = {
+ .mode = {
+ .clock = (800 + 24 + 24 + 24) * (1280 + 30 + 4 + 8) * 60 / 1000,
+
+ .hdisplay = 800,
+ .hsync_start = 800 + 24,
+ .hsync_end = 800 + 24 + 24,
+ .htotal = 800 + 24 + 24 + 24,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 30,
+ .vsync_end = 1280 + 30 + 4,
+ .vtotal = 1280 + 30 + 4 + 8,
+
+ .width_mm = 135,
+ .height_mm = 216,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .init = taiguan_xti05101_01a_init_cmds,
+ .lp11_before_reset = true,
+ .reset_before_power_off_vcioo = true,
+ .vcioo_to_lp11_delay_ms = 5,
+ .lp11_to_reset_delay_ms = 10,
+ .backlight_off_to_display_off_delay_ms = 3,
+ .display_off_to_enter_sleep_delay_ms = 50,
+ .enter_sleep_to_reset_down_delay_ms = 100,
+};
+
static int jadard_dsi_probe(struct mipi_dsi_device *dsi)
{
struct device *dev = &dsi->dev;
@@ -1463,6 +1703,10 @@ static const struct of_device_id jadard_of_match[] = {
.compatible = "radxa,display-8hd-ad002",
.data = &radxa_display_8hd_ad002_desc
},
+ {
+ .compatible = "taiguanck,xti05101-01a",
+ .data = &taiguan_xti05101_01a_desc
+ },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, jadard_of_match);
diff --git a/drivers/gpu/drm/panel/panel-lxd-m9189a.c b/drivers/gpu/drm/panel/panel-lxd-m9189a.c
new file mode 100644
index 000000000000..68019e1e43a9
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-lxd-m9189a.c
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree.
+ * Copyright (c) 2024 Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/types.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+/* Manufacturer specific DSI commands */
+#define EK79007AD3_GAMMA1 0x80
+#define EK79007AD3_GAMMA2 0x81
+#define EK79007AD3_GAMMA3 0x82
+#define EK79007AD3_GAMMA4 0x83
+#define EK79007AD3_GAMMA5 0x84
+#define EK79007AD3_GAMMA6 0x85
+#define EK79007AD3_GAMMA7 0x86
+#define EK79007AD3_PANEL_CTRL3 0xB2
+
+struct m9189_panel {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct regulator *supply;
+ struct gpio_desc *reset_gpio;
+ struct gpio_desc *standby_gpio;
+};
+
+static inline struct m9189_panel *to_m9189_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct m9189_panel, panel);
+}
+
+static void m9189_reset(struct m9189_panel *m9189)
+{
+ gpiod_set_value_cansleep(m9189->reset_gpio, 0);
+ msleep(20);
+ gpiod_set_value_cansleep(m9189->reset_gpio, 1);
+ msleep(30);
+ gpiod_set_value_cansleep(m9189->reset_gpio, 0);
+ msleep(55);
+}
+
+static int m9189_on(struct m9189_panel *m9189)
+{
+ struct mipi_dsi_multi_context ctx = { .dsi = m9189->dsi };
+
+ ctx.dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ /* Gamma 2.2 */
+ mipi_dsi_dcs_write_seq_multi(&ctx, EK79007AD3_GAMMA1, 0x48);
+ mipi_dsi_dcs_write_seq_multi(&ctx, EK79007AD3_GAMMA2, 0xB8);
+ mipi_dsi_dcs_write_seq_multi(&ctx, EK79007AD3_GAMMA3, 0x88);
+ mipi_dsi_dcs_write_seq_multi(&ctx, EK79007AD3_GAMMA4, 0x88);
+ mipi_dsi_dcs_write_seq_multi(&ctx, EK79007AD3_GAMMA5, 0x58);
+ mipi_dsi_dcs_write_seq_multi(&ctx, EK79007AD3_GAMMA6, 0xD2);
+ mipi_dsi_dcs_write_seq_multi(&ctx, EK79007AD3_GAMMA7, 0x88);
+ mipi_dsi_msleep(&ctx, 50);
+
+ /* 4 Lanes */
+ mipi_dsi_generic_write_multi(&ctx, (u8[]){ EK79007AD3_PANEL_CTRL3, 0x70 }, 2);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 120);
+
+ mipi_dsi_dcs_set_display_on_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 120);
+
+ return ctx.accum_err;
+}
+
+static int m9189_disable(struct drm_panel *panel)
+{
+ struct m9189_panel *m9189 = to_m9189_panel(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = m9189->dsi };
+
+ ctx.dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ mipi_dsi_dcs_enter_sleep_mode_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 120);
+
+ gpiod_set_value_cansleep(m9189->standby_gpio, 1);
+
+ return ctx.accum_err;
+}
+
+static int m9189_prepare(struct drm_panel *panel)
+{
+ struct m9189_panel *m9189 = to_m9189_panel(panel);
+ struct device *dev = &m9189->dsi->dev;
+ int ret;
+
+ ret = regulator_enable(m9189->supply);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ gpiod_set_value_cansleep(m9189->standby_gpio, 0);
+ msleep(20);
+ m9189_reset(m9189);
+
+ ret = m9189_on(m9189);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ gpiod_set_value_cansleep(m9189->reset_gpio, 1);
+ regulator_disable(m9189->supply);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int m9189_unprepare(struct drm_panel *panel)
+{
+ struct m9189_panel *m9189 = to_m9189_panel(panel);
+
+ gpiod_set_value_cansleep(m9189->standby_gpio, 1);
+ msleep(50);
+
+ gpiod_set_value_cansleep(m9189->reset_gpio, 1);
+ regulator_disable(m9189->supply);
+
+ return 0;
+}
+
+static const struct drm_display_mode m9189_mode = {
+ .clock = (1024 + 160 + 160 + 10) * (600 + 12 + 23 + 1) * 60 / 1000,
+ .hdisplay = 1024,
+ .hsync_start = 1024 + 160,
+ .hsync_end = 1024 + 160 + 160,
+ .htotal = 1024 + 160 + 160 + 10,
+ .vdisplay = 600,
+ .vsync_start = 600 + 12,
+ .vsync_end = 600 + 12 + 23,
+ .vtotal = 600 + 12 + 23 + 1,
+ .width_mm = 154,
+ .height_mm = 86,
+};
+
+static int m9189_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector, &m9189_mode);
+}
+
+static const struct drm_panel_funcs m9189_panel_funcs = {
+ .prepare = m9189_prepare,
+ .unprepare = m9189_unprepare,
+ .disable = m9189_disable,
+ .get_modes = m9189_get_modes,
+};
+
+static int lxd_m9189_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct m9189_panel *m9189;
+ int ret;
+
+ m9189 = devm_kzalloc(dev, sizeof(*m9189), GFP_KERNEL);
+ if (!m9189)
+ return -ENOMEM;
+
+ m9189->supply = devm_regulator_get(dev, "power");
+ if (IS_ERR(m9189->supply))
+ return dev_err_probe(dev, PTR_ERR(m9189->supply),
+ "Failed to get power-supply\n");
+
+ m9189->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(m9189->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(m9189->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ m9189->standby_gpio = devm_gpiod_get(dev, "standby", GPIOD_OUT_LOW);
+ if (IS_ERR(m9189->standby_gpio))
+ return dev_err_probe(dev, PTR_ERR(m9189->standby_gpio),
+ "Failed to get standby-gpios\n");
+
+ m9189->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, m9189);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST;
+
+ drm_panel_init(&m9189->panel, dev, &m9189_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ m9189->panel.prepare_prev_first = true;
+
+ ret = drm_panel_of_backlight(&m9189->panel);
+ if (ret)
+ return dev_err_probe(dev, ret, "Failed to get backlight\n");
+
+ drm_panel_add(&m9189->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ drm_panel_remove(&m9189->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void lxd_m9189_remove(struct mipi_dsi_device *dsi)
+{
+ struct m9189_panel *m9189 = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&m9189->panel);
+}
+
+static const struct of_device_id lxd_m9189_of_match[] = {
+ { .compatible = "lxd,m9189a" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, lxd_m9189_of_match);
+
+static struct mipi_dsi_driver lxd_m9189_driver = {
+ .probe = lxd_m9189_probe,
+ .remove = lxd_m9189_remove,
+ .driver = {
+ .name = "panel-lxd-m9189a",
+ .of_match_table = lxd_m9189_of_match,
+ },
+};
+module_mipi_dsi_driver(lxd_m9189_driver);
+
+MODULE_DESCRIPTION("DRM driver for LXD M9189A MIPI-DSI panels");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
index 13352cb4ad77..27e188bb2d7f 100644
--- a/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
+++ b/drivers/gpu/drm/panel/panel-mantix-mlaf057we51.c
@@ -168,7 +168,7 @@ static const struct drm_display_mode default_mode_mantix = {
.vsync_start = 1440 + 130,
.vsync_end = 1440 + 130 + 8,
.vtotal = 1440 + 130 + 8 + 106,
- .clock = 85298,
+ .clock = 81237,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
.width_mm = 65,
.height_mm = 130,
@@ -183,7 +183,7 @@ static const struct drm_display_mode default_mode_ys = {
.vsync_start = 1440 + 175,
.vsync_end = 1440 + 175 + 8,
.vtotal = 1440 + 175 + 8 + 50,
- .clock = 85298,
+ .clock = 80706,
.flags = DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC,
.width_mm = 65,
.height_mm = 130,
@@ -295,20 +295,10 @@ static int mantix_probe(struct mipi_dsi_device *dsi)
return 0;
}
-static void mantix_shutdown(struct mipi_dsi_device *dsi)
-{
- struct mantix *ctx = mipi_dsi_get_drvdata(dsi);
-
- drm_panel_unprepare(&ctx->panel);
- drm_panel_disable(&ctx->panel);
-}
-
static void mantix_remove(struct mipi_dsi_device *dsi)
{
struct mantix *ctx = mipi_dsi_get_drvdata(dsi);
- mantix_shutdown(dsi);
-
mipi_dsi_detach(dsi);
drm_panel_remove(&ctx->panel);
}
@@ -323,7 +313,6 @@ MODULE_DEVICE_TABLE(of, mantix_of_match);
static struct mipi_dsi_driver mantix_driver = {
.probe = mantix_probe,
.remove = mantix_remove,
- .shutdown = mantix_shutdown,
.driver = {
.name = DRV_NAME,
.of_match_table = mantix_of_match,
diff --git a/drivers/gpu/drm/panel/panel-motorola-mot.c b/drivers/gpu/drm/panel/panel-motorola-mot.c
new file mode 100644
index 000000000000..eb1f86c3d704
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-motorola-mot.c
@@ -0,0 +1,244 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/array_size.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+static const struct regulator_bulk_data mot_panel_supplies[] = {
+ { .supply = "vddio" }, { .supply = "vdd" },
+};
+
+struct mot_panel {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+
+ struct gpio_desc *reset_gpio;
+
+ struct regulator_bulk_data *supplies;
+};
+
+static inline struct mot_panel *to_mot_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct mot_panel, panel);
+}
+
+static void mot_panel_reset(struct mot_panel *priv)
+{
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ usleep_range(50000, 51000);
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
+ usleep_range(10000, 11000);
+}
+
+static void mot_es2(struct mipi_dsi_multi_context *ctx)
+{
+ mipi_dsi_generic_write_seq_multi(ctx, 0x55, 0x01);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(ctx);
+ mipi_dsi_msleep(ctx, 120);
+
+ mipi_dsi_generic_write_seq_multi(ctx, 0xf4, 0x00, 0xbb, 0x46, 0x53, 0x0c, 0x49,
+ 0x74, 0x29, 0x12, 0x15, 0x2f, 0x2f, 0x04);
+ mipi_dsi_generic_write_seq_multi(ctx, 0xf8, 0x4b, 0x04, 0x10, 0x1a, 0x2c, 0x2c,
+ 0x2c, 0x2c, 0x14, 0x12);
+
+ mipi_dsi_generic_write_seq_multi(ctx, 0xb5, 0x03, 0x7f, 0x00, 0x80, 0xc7, 0x00);
+ mipi_dsi_generic_write_seq_multi(ctx, 0xb7, 0x66, 0xf6, 0x46, 0x9f, 0x90, 0x99,
+ 0xff, 0x80, 0x6d, 0x01);
+
+ /* Gamma R */
+ mipi_dsi_generic_write_seq_multi(ctx, 0xf9, 0x04);
+ mipi_dsi_generic_write_seq_multi(ctx, 0xfa, 0x00, 0x2f, 0x30, 0x12, 0x0e, 0x0c,
+ 0x22, 0x27, 0x31, 0x2e, 0x07, 0x0f);
+ mipi_dsi_generic_write_seq_multi(ctx, 0xfb, 0x00, 0x2f, 0x30, 0x12, 0x0e, 0x0c,
+ 0x22, 0x27, 0x31, 0x2e, 0x07, 0x0f);
+
+ /* Gamma G */
+ mipi_dsi_generic_write_seq_multi(ctx, 0xf9, 0x02);
+ mipi_dsi_generic_write_seq_multi(ctx, 0xfa, 0x00, 0x2f, 0x37, 0x15, 0x15, 0x11,
+ 0x1f, 0x25, 0x2d, 0x2a, 0x05, 0x0f);
+ mipi_dsi_generic_write_seq_multi(ctx, 0xfb, 0x00, 0x2f, 0x37, 0x15, 0x15, 0x11,
+ 0x1f, 0x25, 0x2d, 0x2a, 0x05, 0x0f);
+
+ /* Gamma B */
+ mipi_dsi_generic_write_seq_multi(ctx, 0xf9, 0x01);
+ mipi_dsi_generic_write_seq_multi(ctx, 0xfa, 0x00, 0x2f, 0x3f, 0x16, 0x1f, 0x15,
+ 0x1f, 0x25, 0x2d, 0x2b, 0x06, 0x0b);
+ mipi_dsi_generic_write_seq_multi(ctx, 0xfb, 0x00, 0x2f, 0x3f, 0x16, 0x1f, 0x15,
+ 0x1f, 0x25, 0x2d, 0x2b, 0x06, 0x0b);
+
+ /* Gamma W */
+ mipi_dsi_generic_write_seq_multi(ctx, 0xf9, 0x20);
+ mipi_dsi_generic_write_seq_multi(ctx, 0xfa, 0x00, 0x2f, 0x34, 0x15, 0x1a, 0x11,
+ 0x1f, 0x23, 0x2d, 0x29, 0x02, 0x08);
+ mipi_dsi_generic_write_seq_multi(ctx, 0xfb, 0x00, 0x2f, 0x34, 0x15, 0x1a, 0x11,
+ 0x1f, 0x23, 0x2d, 0x29, 0x02, 0x08);
+
+ mipi_dsi_generic_write_seq_multi(ctx, 0x53, 0x2c);
+ mipi_dsi_generic_write_seq_multi(ctx, 0x35, 0x00);
+}
+
+static int mot_panel_prepare(struct drm_panel *panel)
+{
+ struct mot_panel *priv = to_mot_panel(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = priv->dsi };
+ struct device *dev = panel->dev;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(mot_panel_supplies), priv->supplies);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable power supplies: %d\n", ret);
+ return ret;
+ }
+
+ mot_panel_reset(priv);
+
+ mipi_dsi_generic_write_seq_multi(&ctx, 0xf0, 0x5a, 0x5a);
+ mipi_dsi_generic_write_seq_multi(&ctx, 0xf1, 0x5a, 0x5a);
+ mipi_dsi_generic_write_seq_multi(&ctx, 0xd0, 0x8e);
+
+ mot_es2(&ctx);
+
+ mipi_dsi_dcs_set_display_on_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 20);
+
+ return ctx.accum_err;
+}
+
+static int mot_panel_disable(struct drm_panel *panel)
+{
+ struct mot_panel *priv = to_mot_panel(panel);
+ struct mipi_dsi_multi_context ctx = { .dsi = priv->dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&ctx);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&ctx);
+ mipi_dsi_msleep(&ctx, 70);
+
+ return ctx.accum_err;
+}
+
+static int mot_panel_unprepare(struct drm_panel *panel)
+{
+ struct mot_panel *priv = to_mot_panel(panel);
+
+ usleep_range(10000, 11000);
+
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+ usleep_range(5000, 6000);
+
+ regulator_bulk_disable(ARRAY_SIZE(mot_panel_supplies), priv->supplies);
+
+ return 0;
+}
+
+static const struct drm_display_mode mot_panel_mode = {
+ .clock = (540 + 32 + 32 + 16) * (960 + 12 + 12 + 8) * 60 / 1000,
+ .hdisplay = 540,
+ .hsync_start = 540 + 32,
+ .hsync_end = 540 + 32 + 32,
+ .htotal = 540 + 32 + 32 + 16,
+ .vdisplay = 960,
+ .vsync_start = 960 + 12,
+ .vsync_end = 960 + 12 + 12,
+ .vtotal = 960 + 12 + 12 + 8,
+ .width_mm = 51,
+ .height_mm = 91,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static int mot_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector, &mot_panel_mode);
+}
+
+static const struct drm_panel_funcs mot_panel_panel_funcs = {
+ .prepare = mot_panel_prepare,
+ .disable = mot_panel_disable,
+ .unprepare = mot_panel_unprepare,
+ .get_modes = mot_panel_get_modes,
+};
+
+static int mot_panel_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct mot_panel *priv;
+ int ret;
+
+ priv = devm_drm_panel_alloc(dev, struct mot_panel, panel,
+ &mot_panel_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(priv))
+ return PTR_ERR(priv);
+
+ ret = devm_regulator_bulk_get_const(dev, ARRAY_SIZE(mot_panel_supplies),
+ mot_panel_supplies, &priv->supplies);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to get supplies\n");
+
+ priv->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(priv->reset_gpio),
+ "failed to get reset gpios\n");
+
+ priv->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, priv);
+
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_LPM;
+
+ ret = drm_panel_of_backlight(&priv->panel);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to get backlight\n");
+
+ drm_panel_add(&priv->panel);
+
+ ret = devm_mipi_dsi_attach(dev, dsi);
+ if (ret < 0) {
+ drm_panel_remove(&priv->panel);
+ return dev_err_probe(dev, ret, "failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void mot_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct mot_panel *priv = mipi_dsi_get_drvdata(dsi);
+
+ drm_panel_remove(&priv->panel);
+}
+
+static const struct of_device_id mot_panel_of_match[] = {
+ { .compatible = "motorola,mot-panel" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mot_panel_of_match);
+
+static struct mipi_dsi_driver mot_panel_driver = {
+ .driver = {
+ .name = "panel-motorola-mot",
+ .of_match_table = mot_panel_of_match,
+ },
+ .probe = mot_panel_probe,
+ .remove = mot_panel_remove,
+};
+module_mipi_dsi_driver(mot_panel_driver);
+
+MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>");
+MODULE_DESCRIPTION("Motorola MOT panel driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt37700f.c b/drivers/gpu/drm/panel/panel-novatek-nt37700f.c
new file mode 100644
index 000000000000..74f46a268c0f
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-novatek-nt37700f.c
@@ -0,0 +1,299 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2024, The Linux Foundation. All rights reserved.
+ * Generated with linux-mdss-dsi-panel-driver-generator from vendor device tree:
+ * Copyright (c) 2013, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
+#include <linux/of.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
+
+struct nt37700f_tianma {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct regulator *supply;
+ struct gpio_desc *reset_gpio;
+};
+
+static inline
+struct nt37700f_tianma *to_nt37700f_tianma(struct drm_panel *panel)
+{
+ return container_of(panel, struct nt37700f_tianma, panel);
+}
+
+static void nt37700f_tianma_reset(struct nt37700f_tianma *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(10000, 11000);
+}
+
+#define nt37700f_tianma_panel_switch_page(ctx, page) \
+ mipi_dsi_dcs_write_seq_multi((ctx), 0xf0, 0x55, 0xaa, 0x52, 0x08, (page))
+
+static int nt37700f_tianma_on(struct nt37700f_tianma *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ nt37700f_tianma_panel_switch_page(&dsi_ctx, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc0, 0x56);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xca, 0x52);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x06);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb5, 0x2b, 0x1a);
+ nt37700f_tianma_panel_switch_page(&dsi_ctx, 0x01);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcd, 0x04, 0x82);
+ nt37700f_tianma_panel_switch_page(&dsi_ctx, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcc, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0xaa, 0x55, 0xa5, 0x80);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x55);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf6, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x56);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf6, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xff, 0xaa, 0x55, 0xa5, 0x81);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf3, 0x07);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x05);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xf3, 0x25);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x90, 0x01);
+
+ mipi_dsi_dcs_set_column_address_multi(&dsi_ctx, 0x0000, 1080 - 1);
+ mipi_dsi_dcs_set_page_address_multi(&dsi_ctx, 0x0000, 2160 - 1);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, MIPI_DCS_WRITE_CONTROL_DISPLAY, 0x20);
+ mipi_dsi_dcs_set_tear_on_multi(&dsi_ctx, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+
+ nt37700f_tianma_panel_switch_page(&dsi_ctx, 0x00);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xc0, 0x56);
+ nt37700f_tianma_panel_switch_page(&dsi_ctx, 0x02);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xcd, 0x00);
+ nt37700f_tianma_panel_switch_page(&dsi_ctx, 0x04);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xd0, 0x11, 0x64);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0x6f, 0x09);
+ mipi_dsi_dcs_write_seq_multi(&dsi_ctx, 0xb1, 0x20);
+
+ mipi_dsi_dcs_exit_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 120);
+ mipi_dsi_dcs_set_display_on_multi(&dsi_ctx);
+
+ return dsi_ctx.accum_err;
+}
+
+static int nt37700f_tianma_disable(struct drm_panel *panel)
+{
+ struct nt37700f_tianma *ctx = to_nt37700f_tianma(panel);
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct mipi_dsi_multi_context dsi_ctx = { .dsi = dsi };
+
+ mipi_dsi_dcs_set_display_off_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 50);
+ mipi_dsi_dcs_enter_sleep_mode_multi(&dsi_ctx);
+ mipi_dsi_msleep(&dsi_ctx, 100);
+
+ return dsi_ctx.accum_err;
+}
+
+static int nt37700f_tianma_prepare(struct drm_panel *panel)
+{
+ struct nt37700f_tianma *ctx = to_nt37700f_tianma(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ ret = regulator_enable(ctx->supply);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable power supply: %d\n", ret);
+ return ret;
+ }
+
+ nt37700f_tianma_reset(ctx);
+
+ ret = nt37700f_tianma_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int nt37700f_tianma_unprepare(struct drm_panel *panel)
+{
+ struct nt37700f_tianma *ctx = to_nt37700f_tianma(panel);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ regulator_disable(ctx->supply);
+
+ return 0;
+}
+
+static const struct drm_display_mode nt37700f_tianma_mode = {
+ .clock = (1080 + 32 + 32 + 98) * (2160 + 32 + 4 + 98) * 60 / 1000,
+ .hdisplay = 1080,
+ .hsync_start = 1080 + 32,
+ .hsync_end = 1080 + 32 + 32,
+ .htotal = 1080 + 32 + 32 + 98,
+ .vdisplay = 2160,
+ .vsync_start = 2160 + 32,
+ .vsync_end = 2160 + 32 + 4,
+ .vtotal = 2160 + 32 + 4 + 98,
+ .width_mm = 69,
+ .height_mm = 137,
+ .type = DRM_MODE_TYPE_DRIVER,
+};
+
+static int nt37700f_tianma_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ return drm_connector_helper_get_modes_fixed(connector, &nt37700f_tianma_mode);
+}
+
+static const struct drm_panel_funcs nt37700f_tianma_panel_funcs = {
+ .prepare = nt37700f_tianma_prepare,
+ .unprepare = nt37700f_tianma_unprepare,
+ .disable = nt37700f_tianma_disable,
+ .get_modes = nt37700f_tianma_get_modes,
+};
+
+static int nt37700f_tianma_bl_update_status(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness = backlight_get_brightness(bl);
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_brightness_large(dsi, brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return 0;
+}
+
+static int nt37700f_tianma_bl_get_brightness(struct backlight_device *bl)
+{
+ struct mipi_dsi_device *dsi = bl_get_data(bl);
+ u16 brightness;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_get_display_brightness_large(dsi, &brightness);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ return brightness;
+}
+
+static const struct backlight_ops nt37700f_tianma_bl_ops = {
+ .update_status = nt37700f_tianma_bl_update_status,
+ .get_brightness = nt37700f_tianma_bl_get_brightness,
+};
+
+static struct backlight_device *
+nt37700f_tianma_create_backlight(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ const struct backlight_properties props = {
+ .type = BACKLIGHT_RAW,
+ .brightness = 2047,
+ .max_brightness = 2047,
+ };
+
+ return devm_backlight_device_register(dev, dev_name(dev), dev, dsi,
+ &nt37700f_tianma_bl_ops, &props);
+}
+
+static int nt37700f_tianma_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct nt37700f_tianma *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->supply = devm_regulator_get(dev, "power");
+ if (IS_ERR(ctx->supply))
+ return dev_err_probe(dev, PTR_ERR(ctx->supply),
+ "Failed to get power-supply\n");
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(ctx->reset_gpio))
+ return dev_err_probe(dev, PTR_ERR(ctx->reset_gpio),
+ "Failed to get reset-gpios\n");
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_CLOCK_NON_CONTINUOUS | MIPI_DSI_MODE_LPM;
+
+ drm_panel_init(&ctx->panel, dev, &nt37700f_tianma_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+ ctx->panel.prepare_prev_first = true;
+
+ ctx->panel.backlight = nt37700f_tianma_create_backlight(dsi);
+ if (IS_ERR(ctx->panel.backlight))
+ return dev_err_probe(dev, PTR_ERR(ctx->panel.backlight),
+ "Failed to create backlight\n");
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
+ }
+
+ return 0;
+}
+
+static void nt37700f_tianma_remove(struct mipi_dsi_device *dsi)
+{
+ struct nt37700f_tianma *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+}
+
+static const struct of_device_id nt37700f_tianma_of_match[] = {
+ { .compatible = "novatek,nt37700f" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, nt37700f_tianma_of_match);
+
+static struct mipi_dsi_driver nt37700f_tianma_driver = {
+ .probe = nt37700f_tianma_probe,
+ .remove = nt37700f_tianma_remove,
+ .driver = {
+ .name = "panel-novatek-nt37700f",
+ .of_match_table = nt37700f_tianma_of_match,
+ },
+};
+module_mipi_dsi_driver(nt37700f_tianma_driver);
+
+MODULE_DESCRIPTION("DRM driver for nt37700f cmd mode dsi tianma panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-renesas-r61307.c b/drivers/gpu/drm/panel/panel-renesas-r61307.c
index 319415194839..d8185cc1b5d6 100644
--- a/drivers/gpu/drm/panel/panel-renesas-r61307.c
+++ b/drivers/gpu/drm/panel/panel-renesas-r61307.c
@@ -14,6 +14,7 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
#define R61307_MACP 0xb0 /* Manufacturer CMD Protect */
#define R61307_MACP_ON 0x03
@@ -34,8 +35,6 @@ struct renesas_r61307 {
struct gpio_desc *reset_gpio;
- bool prepared;
-
bool dig_cont_adj;
bool inversion;
u32 gamma;
@@ -91,9 +90,6 @@ static int renesas_r61307_prepare(struct drm_panel *panel)
struct device *dev = &priv->dsi->dev;
int ret;
- if (priv->prepared)
- return 0;
-
ret = regulator_enable(priv->vcc_supply);
if (ret) {
dev_err(dev, "failed to enable vcc power supply\n");
@@ -112,7 +108,6 @@ static int renesas_r61307_prepare(struct drm_panel *panel)
renesas_r61307_reset(priv);
- priv->prepared = true;
return 0;
}
@@ -155,7 +150,7 @@ static int renesas_r61307_enable(struct drm_panel *panel)
mipi_dsi_dcs_set_display_on_multi(&ctx);
mipi_dsi_msleep(&ctx, 50);
- return 0;
+ return ctx.accum_err;
}
static int renesas_r61307_disable(struct drm_panel *panel)
@@ -167,16 +162,13 @@ static int renesas_r61307_disable(struct drm_panel *panel)
mipi_dsi_msleep(&ctx, 100);
mipi_dsi_dcs_enter_sleep_mode_multi(&ctx);
- return 0;
+ return ctx.accum_err;
}
static int renesas_r61307_unprepare(struct drm_panel *panel)
{
struct renesas_r61307 *priv = to_renesas_r61307(panel);
- if (!priv->prepared)
- return 0;
-
usleep_range(10000, 11000);
gpiod_set_value_cansleep(priv->reset_gpio, 1);
@@ -186,7 +178,6 @@ static int renesas_r61307_unprepare(struct drm_panel *panel)
usleep_range(2000, 3000);
regulator_disable(priv->vcc_supply);
- priv->prepared = false;
return 0;
}
@@ -202,25 +193,13 @@ static const struct drm_display_mode renesas_r61307_mode = {
.vtotal = 1024 + 24 + 8 + 2,
.width_mm = 76,
.height_mm = 101,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
};
static int renesas_r61307_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
- struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(connector->dev, &renesas_r61307_mode);
- if (!mode)
- return -ENOMEM;
-
- drm_mode_set_name(mode);
-
- mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- connector->display_info.width_mm = mode->width_mm;
- connector->display_info.height_mm = mode->height_mm;
- drm_mode_probed_add(connector, mode);
-
- return 1;
+ return drm_connector_helper_get_modes_fixed(connector, &renesas_r61307_mode);
}
static const struct drm_panel_funcs renesas_r61307_panel_funcs = {
@@ -259,7 +238,7 @@ static int renesas_r61307_probe(struct mipi_dsi_device *dsi)
return dev_err_probe(dev, PTR_ERR(priv->reset_gpio),
"Failed to get reset gpios\n");
- if (device_property_read_bool(dev, "renesas,inversion"))
+ if (device_property_read_bool(dev, "renesas,column-inversion"))
priv->inversion = true;
if (device_property_read_bool(dev, "renesas,contrast"))
@@ -282,7 +261,7 @@ static int renesas_r61307_probe(struct mipi_dsi_device *dsi)
drm_panel_add(&priv->panel);
- ret = mipi_dsi_attach(dsi);
+ ret = devm_mipi_dsi_attach(dev, dsi);
if (ret) {
drm_panel_remove(&priv->panel);
return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
@@ -294,11 +273,6 @@ static int renesas_r61307_probe(struct mipi_dsi_device *dsi)
static void renesas_r61307_remove(struct mipi_dsi_device *dsi)
{
struct renesas_r61307 *priv = mipi_dsi_get_drvdata(dsi);
- int ret;
-
- ret = mipi_dsi_detach(dsi);
- if (ret)
- dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&priv->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-renesas-r69328.c b/drivers/gpu/drm/panel/panel-renesas-r69328.c
index 46287ab04c30..bfe2787f8f53 100644
--- a/drivers/gpu/drm/panel/panel-renesas-r69328.c
+++ b/drivers/gpu/drm/panel/panel-renesas-r69328.c
@@ -14,6 +14,7 @@
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_modes.h>
#include <drm/drm_panel.h>
+#include <drm/drm_probe_helper.h>
#define R69328_MACP 0xb0 /* Manufacturer Access CMD Protect */
#define R69328_MACP_ON 0x03
@@ -32,8 +33,6 @@ struct renesas_r69328 {
struct regulator *vdd_supply;
struct regulator *vddio_supply;
struct gpio_desc *reset_gpio;
-
- bool prepared;
};
static inline struct renesas_r69328 *to_renesas_r69328(struct drm_panel *panel)
@@ -55,9 +54,6 @@ static int renesas_r69328_prepare(struct drm_panel *panel)
struct device *dev = &priv->dsi->dev;
int ret;
- if (priv->prepared)
- return 0;
-
ret = regulator_enable(priv->vdd_supply);
if (ret) {
dev_err(dev, "failed to enable vdd power supply\n");
@@ -76,7 +72,6 @@ static int renesas_r69328_prepare(struct drm_panel *panel)
renesas_r69328_reset(priv);
- priv->prepared = true;
return 0;
}
@@ -122,7 +117,7 @@ static int renesas_r69328_enable(struct drm_panel *panel)
mipi_dsi_dcs_set_display_on_multi(&ctx);
mipi_dsi_msleep(&ctx, 50);
- return 0;
+ return ctx.accum_err;
}
static int renesas_r69328_disable(struct drm_panel *panel)
@@ -134,16 +129,13 @@ static int renesas_r69328_disable(struct drm_panel *panel)
mipi_dsi_msleep(&ctx, 60);
mipi_dsi_dcs_enter_sleep_mode_multi(&ctx);
- return 0;
+ return ctx.accum_err;
}
static int renesas_r69328_unprepare(struct drm_panel *panel)
{
struct renesas_r69328 *priv = to_renesas_r69328(panel);
- if (!priv->prepared)
- return 0;
-
gpiod_set_value_cansleep(priv->reset_gpio, 1);
usleep_range(5000, 6000);
@@ -151,7 +143,6 @@ static int renesas_r69328_unprepare(struct drm_panel *panel)
regulator_disable(priv->vddio_supply);
regulator_disable(priv->vdd_supply);
- priv->prepared = false;
return 0;
}
@@ -167,25 +158,13 @@ static const struct drm_display_mode renesas_r69328_mode = {
.vtotal = 1280 + 6 + 3 + 1,
.width_mm = 59,
.height_mm = 105,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
};
static int renesas_r69328_get_modes(struct drm_panel *panel,
struct drm_connector *connector)
{
- struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(connector->dev, &renesas_r69328_mode);
- if (!mode)
- return -ENOMEM;
-
- drm_mode_set_name(mode);
-
- mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- connector->display_info.width_mm = mode->width_mm;
- connector->display_info.height_mm = mode->height_mm;
- drm_mode_probed_add(connector, mode);
-
- return 1;
+ return drm_connector_helper_get_modes_fixed(connector, &renesas_r69328_mode);
}
static const struct drm_panel_funcs renesas_r69328_panel_funcs = {
@@ -238,7 +217,7 @@ static int renesas_r69328_probe(struct mipi_dsi_device *dsi)
drm_panel_add(&priv->panel);
- ret = mipi_dsi_attach(dsi);
+ ret = devm_mipi_dsi_attach(dev, dsi);
if (ret) {
drm_panel_remove(&priv->panel);
return dev_err_probe(dev, ret, "Failed to attach to DSI host\n");
@@ -250,11 +229,6 @@ static int renesas_r69328_probe(struct mipi_dsi_device *dsi)
static void renesas_r69328_remove(struct mipi_dsi_device *dsi)
{
struct renesas_r69328 *priv = mipi_dsi_get_drvdata(dsi);
- int ret;
-
- ret = mipi_dsi_detach(dsi);
- if (ret)
- dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
drm_panel_remove(&priv->panel);
}
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index e5fc9b072404..e1d15434ea54 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -2096,6 +2096,35 @@ static const struct panel_desc edt_et057090dhu = {
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
+static const struct display_timing edt_et057023udba_timing = {
+ .pixelclock = { 23200000, 24190000, 39640000 },
+ .hactive = { 640, 640, 640 },
+ .hfront_porch = { 20, 40, 200 },
+ .hback_porch = { 87, 40, 1 },
+ .hsync_len = { 1, 48, 87 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 5, 13, 200 },
+ .vback_porch = { 31, 31, 29 },
+ .vsync_len = { 1, 1, 3 },
+ .flags = DISPLAY_FLAGS_VSYNC_LOW | DISPLAY_FLAGS_HSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_POSEDGE,
+};
+
+static const struct panel_desc edt_et057023udba = {
+ .timings = &edt_et057023udba_timing,
+ .num_timings = 1,
+ .bpc = 8,
+ .size = {
+ .width = 115,
+ .height = 86,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
+ DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
+};
+
static const struct drm_display_mode edt_etm0700g0dh6_mode = {
.clock = 33260,
.hdisplay = 800,
@@ -4046,6 +4075,33 @@ static const struct panel_desc powertip_ph800480t013_idf02 = {
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
+static const struct drm_display_mode powertip_ph800480t032_zhc19_mode = {
+ .clock = 27200,
+ .hdisplay = 800,
+ .hsync_start = 800 + 52,
+ .hsync_end = 800 + 52 + 2,
+ .htotal = 800 + 52 + 2 + 44,
+ .vdisplay = 480,
+ .vsync_start = 480 + 7,
+ .vsync_end = 480 + 7 + 2,
+ .vtotal = 480 + 7 + 2 + 2,
+};
+
+static const struct panel_desc powertip_ph800480t032_zhc19 = {
+ .modes = &powertip_ph800480t032_zhc19_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 152,
+ .height = 91,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
+ DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
+};
+
static const struct drm_display_mode primeview_pm070wl4_mode = {
.clock = 32000,
.hdisplay = 800,
@@ -4976,6 +5032,33 @@ static const struct panel_desc vl050_8048nt_c01 = {
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE,
};
+static const struct drm_display_mode waveshare_133inch_mode = {
+ .clock = 148500,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 88,
+ .hsync_end = 1920 + 88 + 44,
+ .htotal = 1920 + 88 + 44 + 148,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 4,
+ .vsync_end = 1080 + 4 + 5,
+ .vtotal = 1080 + 4 + 5 + 36,
+ .flags = DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_PHSYNC,
+};
+
+static const struct panel_desc waveshare_133inch = {
+ .modes = &waveshare_133inch_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 293,
+ .height = 163,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
+ .bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE |
+ DRM_BUS_FLAG_SYNC_SAMPLE_POSEDGE,
+};
+
static const struct drm_display_mode winstar_wf35ltiacd_mode = {
.clock = 6410,
.hdisplay = 320,
@@ -5226,6 +5309,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "edt,etm0430g0dh6",
.data = &edt_etm0430g0dh6,
}, {
+ .compatible = "edt,et057023udba",
+ .data = &edt_et057023udba,
+ }, {
.compatible = "edt,et057090dhu",
.data = &edt_et057090dhu,
}, {
@@ -5457,6 +5543,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "powertip,ph800480t013-idf02",
.data = &powertip_ph800480t013_idf02,
}, {
+ .compatible = "powertip,ph800480t032-zhc19",
+ .data = &powertip_ph800480t032_zhc19,
+ }, {
.compatible = "primeview,pm070wl4",
.data = &primeview_pm070wl4,
}, {
@@ -5574,6 +5663,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "vxt,vl050-8048nt-c01",
.data = &vl050_8048nt_c01,
}, {
+ .compatible = "waveshare,13.3inch-panel",
+ .data = &waveshare_133inch,
+ }, {
.compatible = "winstar,wf35ltiacd",
.data = &winstar_wf35ltiacd,
}, {
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index a70f1db0764e..2fe04d0f0e3a 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -3916,7 +3916,7 @@ static void job_release(struct kref *ref)
if (job->base.s_fence)
drm_sched_job_cleanup(&job->base);
- if (job->done_fence && job->done_fence->ops)
+ if (dma_fence_was_initialized(job->done_fence))
dma_fence_put(job->done_fence);
else
dma_fence_free(job->done_fence);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 5d523d5dae88..705c012fcf9e 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -37,6 +37,7 @@
#include <drm/drm_client_event.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_device.h>
+#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_framebuffer.h>
#include <drm/drm_probe_helper.h>
@@ -1574,7 +1575,6 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_framebuffer *fb = crtc->primary->fb;
- struct radeon_bo *robj;
if (radeon_crtc->cursor_bo) {
struct radeon_bo *robj = gem_to_radeon_bo(radeon_crtc->cursor_bo);
@@ -1588,9 +1588,10 @@ int radeon_suspend_kms(struct drm_device *dev, bool suspend,
if (fb == NULL || fb->obj[0] == NULL) {
continue;
}
- robj = gem_to_radeon_bo(fb->obj[0]);
/* don't unpin kernel fb objects */
- if (!radeon_fbdev_robj_is_fb(rdev, robj)) {
+ if (!drm_fb_helper_gem_is_fb(dev->fb_helper, fb->obj[0])) {
+ struct radeon_bo *robj = gem_to_radeon_bo(fb->obj[0]);
+
r = radeon_bo_reserve(robj, false);
if (r == 0) {
radeon_bo_unpin(robj);
diff --git a/drivers/gpu/drm/radeon/radeon_fbdev.c b/drivers/gpu/drm/radeon/radeon_fbdev.c
index 18d61f3f7344..3e243f5e2f44 100644
--- a/drivers/gpu/drm/radeon/radeon_fbdev.c
+++ b/drivers/gpu/drm/radeon/radeon_fbdev.c
@@ -274,20 +274,3 @@ err_radeon_fbdev_destroy_pinned_object:
radeon_fbdev_destroy_pinned_object(gobj);
return ret;
}
-
-bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
-{
- struct drm_fb_helper *fb_helper = rdev_to_drm(rdev)->fb_helper;
- struct drm_gem_object *gobj;
-
- if (!fb_helper)
- return false;
-
- gobj = drm_gem_fb_get_obj(fb_helper->fb, 0);
- if (!gobj)
- return false;
- if (gobj != &robj->tbo.base)
- return false;
-
- return true;
-}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 088af85902f7..ae1ecdc2e189 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -936,14 +936,9 @@ int radeon_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes);
#define RADEON_FBDEV_DRIVER_OPS \
.fbdev_probe = radeon_fbdev_driver_fbdev_probe
-bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj);
#else
#define RADEON_FBDEV_DRIVER_OPS \
.fbdev_probe = NULL
-static inline bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
-{
- return false;
-}
#endif
void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 0765d69423d2..34ce53b4bdb9 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -222,10 +222,8 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
struct dma_fence *fence;
dma_resv_iter_begin(&cursor, resv, DMA_RESV_USAGE_BOOKKEEP);
- dma_resv_for_each_fence_unlocked(&cursor, fence) {
- if (!fence->ops->signaled)
- dma_fence_enable_sw_signaling(fence);
- }
+ dma_resv_for_each_fence_unlocked(&cursor, fence)
+ dma_fence_enable_sw_signaling(fence);
dma_resv_iter_end(&cursor);
}
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 9ce90a694c3c..9898e5451a07 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -2449,17 +2449,23 @@ static int vc4_hdmi_hotplug_init(struct vc4_hdmi *vc4_hdmi)
int ret;
if (vc4_hdmi->variant->external_irq_controller) {
- unsigned int hpd_con = platform_get_irq_byname(pdev, "hpd-connected");
- unsigned int hpd_rm = platform_get_irq_byname(pdev, "hpd-removed");
+ int hpd = platform_get_irq_byname(pdev, "hpd-connected");
- ret = devm_request_threaded_irq(&pdev->dev, hpd_con,
+ if (hpd < 0)
+ return hpd;
+
+ ret = devm_request_threaded_irq(&pdev->dev, hpd,
NULL,
vc4_hdmi_hpd_irq_thread, IRQF_ONESHOT,
"vc4 hdmi hpd connected", vc4_hdmi);
if (ret)
return ret;
- ret = devm_request_threaded_irq(&pdev->dev, hpd_rm,
+ hpd = platform_get_irq_byname(pdev, "hpd-removed");
+ if (hpd < 0)
+ return hpd;
+
+ ret = devm_request_threaded_irq(&pdev->dev, hpd,
NULL,
vc4_hdmi_hpd_irq_thread, IRQF_ONESHOT,
"vc4 hdmi hpd disconnected", vc4_hdmi);
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index 3a3f9f22d42a..dab979287a96 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -211,14 +211,15 @@ $(obj)/i915-display/%.o: $(srctree)/drivers/gpu/drm/i915/display/%.c FORCE
# Display code specific to xe
xe-$(CONFIG_DRM_XE_DISPLAY) += \
- display/intel_bo.o \
- display/intel_fb_bo.o \
display/intel_fbdev_fb.o \
display/xe_display.o \
+ display/xe_display_bo.o \
+ display/xe_display_pcode.o \
display/xe_display_rpm.o \
display/xe_display_wa.o \
display/xe_dsb_buffer.o \
display/xe_fb_pin.o \
+ display/xe_frontbuffer.o \
display/xe_hdcp_gsc.o \
display/xe_initial_plane.o \
display/xe_panic.o \
@@ -233,6 +234,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_audio.o \
i915-display/intel_backlight.o \
i915-display/intel_bios.o \
+ i915-display/intel_bo.o \
i915-display/intel_bw.o \
i915-display/intel_casf.o \
i915-display/intel_cdclk.o \
@@ -275,7 +277,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_dp_test.o \
i915-display/intel_dpll.o \
i915-display/intel_dpll_mgr.o \
- i915-display/intel_dpt_common.o \
+ i915-display/intel_dpt.o \
i915-display/intel_dram.o \
i915-display/intel_drrs.o \
i915-display/intel_dsb.o \
@@ -304,6 +306,7 @@ xe-$(CONFIG_DRM_XE_DISPLAY) += \
i915-display/intel_modeset_lock.o \
i915-display/intel_modeset_setup.o \
i915-display/intel_modeset_verify.o \
+ i915-display/intel_overlay.o \
i915-display/intel_panel.o \
i915-display/intel_parent.o \
i915-display/intel_pch.o \
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_reg.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_reg.h
deleted file mode 100644
index 8619ec015ad4..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_reg.h
+++ /dev/null
@@ -1,6 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#include "../../i915/i915_reg.h"
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
deleted file mode 100644
index c4b5adaaa99a..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_vma.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef I915_VMA_H
-#define I915_VMA_H
-
-#include <uapi/drm/i915_drm.h>
-
-#include "xe_ggtt.h"
-
-#include <linux/refcount.h>
-
-/* We don't want these from i915_drm.h in case of Xe */
-#undef I915_TILING_X
-#undef I915_TILING_Y
-#define I915_TILING_X 0
-#define I915_TILING_Y 0
-
-struct xe_bo;
-
-struct i915_vma {
- refcount_t ref;
- struct xe_bo *bo, *dpt;
- struct xe_ggtt_node *node;
-};
-
-#define i915_vma_fence_id(vma) -1
-
-static inline u32 i915_ggtt_offset(const struct i915_vma *vma)
-{
- return xe_ggtt_node_addr(vma->node);
-}
-
-#endif
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h
deleted file mode 100644
index 4fcd3bf6b76f..000000000000
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2023 Intel Corporation
- */
-
-#ifndef __INTEL_PCODE_H__
-#define __INTEL_PCODE_H__
-
-#include "xe_pcode.h"
-
-#endif /* __INTEL_PCODE_H__ */
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
index c05d4c4292d3..a8cfd65119e0 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
@@ -6,6 +6,7 @@
#ifndef __INTEL_UNCORE_H__
#define __INTEL_UNCORE_H__
+#include "i915_reg_defs.h"
#include "xe_device.h"
#include "xe_device_types.h"
#include "xe_mmio.h"
@@ -38,6 +39,14 @@ static inline u8 intel_uncore_read8(struct intel_uncore *uncore,
return xe_mmio_read8(__compat_uncore_to_mmio(uncore), reg);
}
+static inline void intel_uncore_write8(struct intel_uncore *uncore,
+ i915_reg_t i915_reg, u8 val)
+{
+ struct xe_reg reg = XE_REG(i915_mmio_reg_offset(i915_reg));
+
+ xe_mmio_write8(__compat_uncore_to_mmio(uncore), reg, val);
+}
+
static inline u16 intel_uncore_read16(struct intel_uncore *uncore,
i915_reg_t i915_reg)
{
diff --git a/drivers/gpu/drm/xe/display/intel_bo.c b/drivers/gpu/drm/xe/display/intel_bo.c
deleted file mode 100644
index 05d5e5c0a0de..000000000000
--- a/drivers/gpu/drm/xe/display/intel_bo.c
+++ /dev/null
@@ -1,109 +0,0 @@
-// SPDX-License-Identifier: MIT
-/* Copyright © 2024 Intel Corporation */
-
-#include <drm/drm_gem.h>
-
-#include "intel_bo.h"
-#include "intel_frontbuffer.h"
-#include "xe_bo.h"
-#include "xe_pxp.h"
-
-bool intel_bo_is_tiled(struct drm_gem_object *obj)
-{
- /* legacy tiling is unused */
- return false;
-}
-
-bool intel_bo_is_userptr(struct drm_gem_object *obj)
-{
- /* xe does not have userptr bos */
- return false;
-}
-
-bool intel_bo_is_shmem(struct drm_gem_object *obj)
-{
- return false;
-}
-
-bool intel_bo_is_protected(struct drm_gem_object *obj)
-{
- return xe_bo_is_protected(gem_to_xe_bo(obj));
-}
-
-int intel_bo_key_check(struct drm_gem_object *obj)
-{
- return xe_pxp_obj_key_check(obj);
-}
-
-int intel_bo_fb_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
-{
- return drm_gem_prime_mmap(obj, vma);
-}
-
-int intel_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size)
-{
- struct xe_bo *bo = gem_to_xe_bo(obj);
-
- return xe_bo_read(bo, offset, dst, size);
-}
-
-struct xe_frontbuffer {
- struct intel_frontbuffer base;
- struct drm_gem_object *obj;
- struct kref ref;
-};
-
-struct intel_frontbuffer *intel_bo_frontbuffer_get(struct drm_gem_object *obj)
-{
- struct xe_frontbuffer *front;
-
- front = kmalloc_obj(*front);
- if (!front)
- return NULL;
-
- intel_frontbuffer_init(&front->base, obj->dev);
-
- kref_init(&front->ref);
-
- drm_gem_object_get(obj);
- front->obj = obj;
-
- return &front->base;
-}
-
-void intel_bo_frontbuffer_ref(struct intel_frontbuffer *_front)
-{
- struct xe_frontbuffer *front =
- container_of(_front, typeof(*front), base);
-
- kref_get(&front->ref);
-}
-
-static void frontbuffer_release(struct kref *ref)
-{
- struct xe_frontbuffer *front =
- container_of(ref, typeof(*front), ref);
-
- intel_frontbuffer_fini(&front->base);
-
- drm_gem_object_put(front->obj);
-
- kfree(front);
-}
-
-void intel_bo_frontbuffer_put(struct intel_frontbuffer *_front)
-{
- struct xe_frontbuffer *front =
- container_of(_front, typeof(*front), base);
-
- kref_put(&front->ref, frontbuffer_release);
-}
-
-void intel_bo_frontbuffer_flush_for_display(struct intel_frontbuffer *front)
-{
-}
-
-void intel_bo_describe(struct seq_file *m, struct drm_gem_object *obj)
-{
- /* FIXME */
-}
diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
index 7ad76022cb14..87af5646c938 100644
--- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
@@ -23,6 +23,29 @@ u32 intel_fbdev_fb_pitch_align(u32 stride)
return ALIGN(stride, XE_PAGE_SIZE);
}
+bool intel_fbdev_fb_prefer_stolen(struct drm_device *drm, unsigned int size)
+{
+ struct xe_device *xe = to_xe_device(drm);
+ struct ttm_resource_manager *stolen;
+
+ stolen = ttm_manager_type(&xe->ttm, XE_PL_STOLEN);
+ if (!stolen)
+ return false;
+
+ if (IS_DGFX(xe))
+ return false;
+
+ if (XE_DEVICE_WA(xe, 22019338487_display))
+ return false;
+
+ /*
+ * If the FB is too big, just don't use it since fbdev is not very
+ * important and we should probably use that space with FBC or other
+ * features.
+ */
+ return stolen->size >= size * 2;
+}
+
struct drm_gem_object *intel_fbdev_fb_bo_create(struct drm_device *drm, int size)
{
struct xe_device *xe = to_xe_device(drm);
@@ -30,7 +53,7 @@ struct drm_gem_object *intel_fbdev_fb_bo_create(struct drm_device *drm, int size
obj = ERR_PTR(-ENODEV);
- if (!IS_DGFX(xe) && !XE_DEVICE_WA(xe, 22019338487_display)) {
+ if (intel_fbdev_fb_prefer_stolen(drm, size)) {
obj = xe_bo_create_pin_map_novm(xe, xe_device_get_root_tile(xe),
size,
ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT |
@@ -40,6 +63,8 @@ struct drm_gem_object *intel_fbdev_fb_bo_create(struct drm_device *drm, int size
drm_info(&xe->drm, "Allocated fbdev into stolen\n");
else
drm_info(&xe->drm, "Allocated fbdev into stolen failed: %li\n", PTR_ERR(obj));
+ } else {
+ drm_info(&xe->drm, "Allocating fbdev: Stolen memory not preferred.\n");
}
if (IS_ERR(obj)) {
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index f8a831b5dc7d..49b6f98e7391 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -35,7 +35,11 @@
#include "intel_hotplug.h"
#include "intel_opregion.h"
#include "skl_watermark.h"
+#include "xe_display_bo.h"
+#include "xe_display_pcode.h"
#include "xe_display_rpm.h"
+#include "xe_dsb_buffer.h"
+#include "xe_frontbuffer.h"
#include "xe_hdcp_gsc.h"
#include "xe_initial_plane.h"
#include "xe_module.h"
@@ -538,10 +542,14 @@ static const struct intel_display_irq_interface xe_display_irq_interface = {
};
static const struct intel_display_parent_interface parent = {
+ .bo = &xe_display_bo_interface,
+ .dsb = &xe_display_dsb_interface,
+ .frontbuffer = &xe_display_frontbuffer_interface,
.hdcp = &xe_display_hdcp_interface,
.initial_plane = &xe_display_initial_plane_interface,
.irq = &xe_display_irq_interface,
.panic = &xe_display_panic_interface,
+ .pcode = &xe_display_pcode_interface,
.rpm = &xe_display_rpm_interface,
.stolen = &xe_display_stolen_interface,
};
diff --git a/drivers/gpu/drm/xe/display/intel_fb_bo.c b/drivers/gpu/drm/xe/display/xe_display_bo.c
index db8b1a27b4de..a689f71e7b14 100644
--- a/drivers/gpu/drm/xe/display/intel_fb_bo.c
+++ b/drivers/gpu/drm/xe/display/xe_display_bo.c
@@ -1,31 +1,28 @@
-/* SPDX-License-Identifier: MIT */
-/*
- * Copyright © 2021 Intel Corporation
- */
+// SPDX-License-Identifier: MIT
+/* Copyright © 2024 Intel Corporation */
-#include <drm/drm_modeset_helper.h>
-#include <drm/ttm/ttm_bo.h>
+#include <drm/drm_gem.h>
+#include <drm/intel/display_parent_interface.h>
-#include "intel_display_types.h"
#include "intel_fb.h"
-#include "intel_fb_bo.h"
#include "xe_bo.h"
+#include "xe_display_bo.h"
+#include "xe_pxp.h"
-void intel_fb_bo_framebuffer_fini(struct drm_gem_object *obj)
+static bool xe_display_bo_is_protected(struct drm_gem_object *obj)
+{
+ return xe_bo_is_protected(gem_to_xe_bo(obj));
+}
+
+static int xe_display_bo_read_from_page(struct drm_gem_object *obj, u64 offset, void *dst, int size)
{
struct xe_bo *bo = gem_to_xe_bo(obj);
- if (bo->flags & XE_BO_FLAG_PINNED) {
- /* Unpin our kernel fb first */
- xe_bo_lock(bo, false);
- xe_bo_unpin(bo);
- xe_bo_unlock(bo);
- }
- xe_bo_put(bo);
+ return xe_bo_read(bo, offset, dst, size);
}
-int intel_fb_bo_framebuffer_init(struct drm_gem_object *obj,
- struct drm_mode_fb_cmd2 *mode_cmd)
+static int xe_display_bo_framebuffer_init(struct drm_gem_object *obj,
+ struct drm_mode_fb_cmd2 *mode_cmd)
{
struct xe_bo *bo = gem_to_xe_bo(obj);
struct xe_device *xe = to_xe_device(bo->ttm.base.dev);
@@ -67,9 +64,23 @@ err:
return ret;
}
-struct drm_gem_object *intel_fb_bo_lookup_valid_bo(struct drm_device *drm,
- struct drm_file *filp,
- const struct drm_mode_fb_cmd2 *mode_cmd)
+static void xe_display_bo_framebuffer_fini(struct drm_gem_object *obj)
+{
+ struct xe_bo *bo = gem_to_xe_bo(obj);
+
+ if (bo->flags & XE_BO_FLAG_PINNED) {
+ /* Unpin our kernel fb first */
+ xe_bo_lock(bo, false);
+ xe_bo_unpin(bo);
+ xe_bo_unlock(bo);
+ }
+ xe_bo_put(bo);
+}
+
+static struct drm_gem_object *
+xe_display_bo_framebuffer_lookup(struct drm_device *drm,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
{
struct xe_device *xe = to_xe_device(drm);
struct xe_bo *bo;
@@ -89,3 +100,13 @@ struct drm_gem_object *intel_fb_bo_lookup_valid_bo(struct drm_device *drm,
return gem;
}
+
+const struct intel_display_bo_interface xe_display_bo_interface = {
+ .is_protected = xe_display_bo_is_protected,
+ .key_check = xe_pxp_obj_key_check,
+ .fb_mmap = drm_gem_prime_mmap,
+ .read_from_page = xe_display_bo_read_from_page,
+ .framebuffer_init = xe_display_bo_framebuffer_init,
+ .framebuffer_fini = xe_display_bo_framebuffer_fini,
+ .framebuffer_lookup = xe_display_bo_framebuffer_lookup,
+};
diff --git a/drivers/gpu/drm/xe/display/xe_display_bo.h b/drivers/gpu/drm/xe/display/xe_display_bo.h
new file mode 100644
index 000000000000..6879c104b0b1
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_display_bo.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef __XE_DISPLAY_BO_H__
+#define __XE_DISPLAY_BO_H__
+
+extern const struct intel_display_bo_interface xe_display_bo_interface;
+
+#endif
diff --git a/drivers/gpu/drm/xe/display/xe_display_pcode.c b/drivers/gpu/drm/xe/display/xe_display_pcode.c
new file mode 100644
index 000000000000..f6820ef7e666
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_display_pcode.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2026 Intel Corporation */
+
+#include <drm/intel/display_parent_interface.h>
+
+#include "xe_device.h"
+#include "xe_pcode.h"
+
+static int xe_display_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1)
+{
+ struct xe_device *xe = to_xe_device(drm);
+ struct xe_tile *tile = xe_device_get_root_tile(xe);
+
+ return xe_pcode_read(tile, mbox, val, val1);
+}
+
+static int xe_display_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms)
+{
+ struct xe_device *xe = to_xe_device(drm);
+ struct xe_tile *tile = xe_device_get_root_tile(xe);
+
+ return xe_pcode_write_timeout(tile, mbox, val, timeout_ms);
+}
+
+static int xe_display_pcode_request(struct drm_device *drm, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms)
+{
+ struct xe_device *xe = to_xe_device(drm);
+ struct xe_tile *tile = xe_device_get_root_tile(xe);
+
+ return xe_pcode_request(tile, mbox, request, reply_mask, reply, timeout_base_ms);
+}
+
+const struct intel_display_pcode_interface xe_display_pcode_interface = {
+ .read = xe_display_pcode_read,
+ .write = xe_display_pcode_write_timeout,
+ .request = xe_display_pcode_request,
+};
diff --git a/drivers/gpu/drm/xe/display/xe_display_pcode.h b/drivers/gpu/drm/xe/display/xe_display_pcode.h
new file mode 100644
index 000000000000..58bd2fb7fb79
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_display_pcode.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef __XE_DISPLAY_PCODE_H__
+#define __XE_DISPLAY_PCODE_H__
+
+extern const struct intel_display_pcode_interface xe_display_pcode_interface;
+
+#endif
diff --git a/drivers/gpu/drm/xe/display/xe_display_vma.h b/drivers/gpu/drm/xe/display/xe_display_vma.h
new file mode 100644
index 000000000000..28267be61ae0
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_display_vma.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef _XE_DISPLAY_VMA_H_
+#define _XE_DISPLAY_VMA_H_
+
+#include <linux/refcount.h>
+
+struct xe_bo;
+struct xe_ggtt_node;
+
+struct i915_vma {
+ refcount_t ref;
+ struct xe_bo *bo, *dpt;
+ struct xe_ggtt_node *node;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
index 8ffc13855ef7..1c67a950c6ad 100644
--- a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
+++ b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
@@ -3,10 +3,12 @@
* Copyright 2023, Intel Corporation.
*/
-#include "intel_dsb_buffer.h"
+#include <drm/intel/display_parent_interface.h>
+
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_device_types.h"
+#include "xe_dsb_buffer.h"
struct intel_dsb_buffer {
u32 *cmd_buf;
@@ -14,29 +16,29 @@ struct intel_dsb_buffer {
size_t buf_size;
};
-u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
+static u32 xe_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
{
return xe_bo_ggtt_addr(dsb_buf->bo);
}
-void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val)
+static void xe_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val)
{
iosys_map_wr(&dsb_buf->bo->vmap, idx * 4, u32, val);
}
-u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
+static u32 xe_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx)
{
return iosys_map_rd(&dsb_buf->bo->vmap, idx * 4, u32);
}
-void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size)
+static void xe_dsb_buffer_fill(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size)
{
WARN_ON(idx > (dsb_buf->buf_size - size) / sizeof(*dsb_buf->cmd_buf));
iosys_map_memset(&dsb_buf->bo->vmap, idx * 4, val, size);
}
-struct intel_dsb_buffer *intel_dsb_buffer_create(struct drm_device *drm, size_t size)
+static struct intel_dsb_buffer *xe_dsb_buffer_create(struct drm_device *drm, size_t size)
{
struct xe_device *xe = to_xe_device(drm);
struct intel_dsb_buffer *dsb_buf;
@@ -69,13 +71,13 @@ err_pin_map:
return ERR_PTR(ret);
}
-void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf)
+static void xe_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf)
{
xe_bo_unpin_map_no_vm(dsb_buf->bo);
kfree(dsb_buf);
}
-void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf)
+static void xe_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf)
{
struct xe_device *xe = dsb_buf->bo->tile->xe;
@@ -86,3 +88,13 @@ void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf)
xe_device_wmb(xe);
xe_device_l2_flush(xe);
}
+
+const struct intel_display_dsb_interface xe_display_dsb_interface = {
+ .ggtt_offset = xe_dsb_buffer_ggtt_offset,
+ .write = xe_dsb_buffer_write,
+ .read = xe_dsb_buffer_read,
+ .fill = xe_dsb_buffer_fill,
+ .create = xe_dsb_buffer_create,
+ .cleanup = xe_dsb_buffer_cleanup,
+ .flush_map = xe_dsb_buffer_flush_map,
+};
diff --git a/drivers/gpu/drm/xe/display/xe_dsb_buffer.h b/drivers/gpu/drm/xe/display/xe_dsb_buffer.h
new file mode 100644
index 000000000000..2e4772187016
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_dsb_buffer.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef __XE_DSB_BUFFER_H__
+#define __XE_DSB_BUFFER_H__
+
+extern const struct intel_display_dsb_interface xe_display_dsb_interface;
+
+#endif
diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c
index e1d29b6ba043..dbbc61032b7f 100644
--- a/drivers/gpu/drm/xe/display/xe_fb_pin.c
+++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c
@@ -5,15 +5,14 @@
#include <drm/ttm/ttm_bo.h>
-#include "i915_vma.h"
#include "intel_display_core.h"
#include "intel_display_types.h"
-#include "intel_dpt.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
#include "intel_fbdev.h"
#include "xe_bo.h"
#include "xe_device.h"
+#include "xe_display_vma.h"
#include "xe_ggtt.h"
#include "xe_pm.h"
#include "xe_vram_types.h"
@@ -409,7 +408,7 @@ found:
refcount_inc(&vma->ref);
new_plane_state->ggtt_vma = vma;
- new_plane_state->surf = i915_ggtt_offset(new_plane_state->ggtt_vma) +
+ new_plane_state->surf = xe_ggtt_node_addr(new_plane_state->ggtt_vma->node) +
plane->surf_offset(new_plane_state);
return true;
@@ -439,7 +438,7 @@ int intel_plane_pin_fb(struct intel_plane_state *new_plane_state,
new_plane_state->ggtt_vma = vma;
- new_plane_state->surf = i915_ggtt_offset(new_plane_state->ggtt_vma) +
+ new_plane_state->surf = xe_ggtt_node_addr(new_plane_state->ggtt_vma->node) +
plane->surf_offset(new_plane_state);
return 0;
@@ -451,25 +450,6 @@ void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
old_plane_state->ggtt_vma = NULL;
}
-/*
- * For Xe introduce dummy intel_dpt_create which just return NULL,
- * intel_dpt_destroy which does nothing, and fake intel_dpt_ofsset returning 0;
- */
-struct i915_address_space *intel_dpt_create(struct intel_framebuffer *fb)
-{
- return NULL;
-}
-
-void intel_dpt_destroy(struct i915_address_space *vm)
-{
- return;
-}
-
-u64 intel_dpt_offset(struct i915_vma *dpt_vma)
-{
- return 0;
-}
-
void intel_fb_get_map(struct i915_vma *vma, struct iosys_map *map)
{
*map = vma->bo->vmap;
diff --git a/drivers/gpu/drm/xe/display/xe_frontbuffer.c b/drivers/gpu/drm/xe/display/xe_frontbuffer.c
new file mode 100644
index 000000000000..113fc017ee94
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_frontbuffer.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: MIT
+/* Copyright © 2026 Intel Corporation */
+
+#include <drm/drm_gem.h>
+#include <drm/intel/display_parent_interface.h>
+
+#include "intel_frontbuffer.h"
+#include "xe_frontbuffer.h"
+
+struct xe_frontbuffer {
+ struct intel_frontbuffer base;
+ struct drm_gem_object *obj;
+ struct kref ref;
+};
+
+static struct intel_frontbuffer *xe_frontbuffer_get(struct drm_gem_object *obj)
+{
+ struct xe_frontbuffer *front;
+
+ front = kmalloc_obj(*front);
+ if (!front)
+ return NULL;
+
+ intel_frontbuffer_init(&front->base, obj->dev);
+
+ kref_init(&front->ref);
+
+ drm_gem_object_get(obj);
+ front->obj = obj;
+
+ return &front->base;
+}
+
+static void xe_frontbuffer_ref(struct intel_frontbuffer *_front)
+{
+ struct xe_frontbuffer *front =
+ container_of(_front, typeof(*front), base);
+
+ kref_get(&front->ref);
+}
+
+static void frontbuffer_release(struct kref *ref)
+{
+ struct xe_frontbuffer *front =
+ container_of(ref, typeof(*front), ref);
+
+ intel_frontbuffer_fini(&front->base);
+
+ drm_gem_object_put(front->obj);
+
+ kfree(front);
+}
+
+static void xe_frontbuffer_put(struct intel_frontbuffer *_front)
+{
+ struct xe_frontbuffer *front =
+ container_of(_front, typeof(*front), base);
+
+ kref_put(&front->ref, frontbuffer_release);
+}
+
+static void xe_frontbuffer_flush_for_display(struct intel_frontbuffer *front)
+{
+}
+
+const struct intel_display_frontbuffer_interface xe_display_frontbuffer_interface = {
+ .get = xe_frontbuffer_get,
+ .ref = xe_frontbuffer_ref,
+ .put = xe_frontbuffer_put,
+ .flush_for_display = xe_frontbuffer_flush_for_display,
+};
diff --git a/drivers/gpu/drm/xe/display/xe_frontbuffer.h b/drivers/gpu/drm/xe/display/xe_frontbuffer.h
new file mode 100644
index 000000000000..6b4f59b42ade
--- /dev/null
+++ b/drivers/gpu/drm/xe/display/xe_frontbuffer.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef _XE_FRONTBUFFER_H_
+#define _XE_FRONTBUFFER_H_
+
+extern const struct intel_display_frontbuffer_interface xe_display_frontbuffer_interface;
+
+#endif
diff --git a/drivers/gpu/drm/xe/display/xe_initial_plane.c b/drivers/gpu/drm/xe/display/xe_initial_plane.c
index 4cfeafcc158d..65cc0b0c934b 100644
--- a/drivers/gpu/drm/xe/display/xe_initial_plane.c
+++ b/drivers/gpu/drm/xe/display/xe_initial_plane.c
@@ -3,26 +3,21 @@
* Copyright © 2021 Intel Corporation
*/
-/* for ioread64 */
-#include <linux/io-64-nonatomic-lo-hi.h>
-
#include <drm/intel/display_parent_interface.h>
#include "regs/xe_gtt_defs.h"
-#include "xe_ggtt.h"
-#include "xe_mmio.h"
-#include "i915_vma.h"
#include "intel_crtc.h"
#include "intel_display_regs.h"
#include "intel_display_types.h"
#include "intel_fb.h"
#include "intel_fb_pin.h"
+#include "intel_fbdev_fb.h"
#include "xe_bo.h"
+#include "xe_display_vma.h"
+#include "xe_ggtt.h"
+#include "xe_mmio.h"
#include "xe_vram_types.h"
-#include "xe_wa.h"
-
-#include <generated/xe_device_wa_oob.h>
/* Early xe has no irq */
static void xe_initial_plane_vblank_wait(struct drm_crtc *_crtc)
@@ -90,17 +85,11 @@ initial_plane_bo(struct xe_device *xe,
phys_base = base;
flags |= XE_BO_FLAG_STOLEN;
- if (XE_DEVICE_WA(xe, 22019338487_display))
- return NULL;
-
- /*
- * If the FB is too big, just don't use it since fbdev is not very
- * important and we should probably use that space with FBC or other
- * features.
- */
if (IS_ENABLED(CONFIG_FRAMEBUFFER_CONSOLE) &&
- plane_config->size * 2 >> PAGE_SHIFT >= stolen->size)
+ !intel_fbdev_fb_prefer_stolen(&xe->drm, plane_config->size)) {
+ drm_info(&xe->drm, "Initial FB size exceeds half of stolen, discarding\n");
return NULL;
+ }
}
size = round_up(plane_config->base + plane_config->size,
@@ -170,7 +159,7 @@ xe_initial_plane_setup(struct drm_plane_state *_plane_state,
plane_state->ggtt_vma = vma;
- plane_state->surf = i915_ggtt_offset(plane_state->ggtt_vma);
+ plane_state->surf = xe_ggtt_node_addr(plane_state->ggtt_vma->node);
plane_config->vma = vma;
diff --git a/drivers/gpu/drm/xe/regs/xe_reg_defs.h b/drivers/gpu/drm/xe/regs/xe_reg_defs.h
index c39aab843e35..27ac0bf1f6cd 100644
--- a/drivers/gpu/drm/xe/regs/xe_reg_defs.h
+++ b/drivers/gpu/drm/xe/regs/xe_reg_defs.h
@@ -6,12 +6,13 @@
#ifndef _XE_REG_DEFS_H_
#define _XE_REG_DEFS_H_
+#include <drm/intel/pick.h>
+#include <drm/intel/reg_bits.h>
+
#include <linux/build_bug.h>
#include <linux/log2.h>
#include <linux/sizes.h>
-#include "compat-i915-headers/i915_reg_defs.h"
-
/**
* XE_REG_ADDR_MAX - The upper limit on MMIO register address
*
diff --git a/drivers/gpu/drm/xe/xe_eu_stall.c b/drivers/gpu/drm/xe/xe_eu_stall.c
index 39723928a019..c34408cfd292 100644
--- a/drivers/gpu/drm/xe/xe_eu_stall.c
+++ b/drivers/gpu/drm/xe/xe_eu_stall.c
@@ -442,9 +442,9 @@ static void clear_dropped_eviction_line_bit(struct xe_gt *gt, u16 group, u16 ins
* On Xe2 and later GPUs, the bit has to be cleared by writing 0 to it.
*/
if (GRAPHICS_VER(xe) >= 20)
- write_ptr_reg = _MASKED_BIT_DISABLE(XEHPC_EUSTALL_REPORT_OVERFLOW_DROP);
+ write_ptr_reg = REG_MASKED_FIELD_DISABLE(XEHPC_EUSTALL_REPORT_OVERFLOW_DROP);
else
- write_ptr_reg = _MASKED_BIT_ENABLE(XEHPC_EUSTALL_REPORT_OVERFLOW_DROP);
+ write_ptr_reg = REG_MASKED_FIELD_ENABLE(XEHPC_EUSTALL_REPORT_OVERFLOW_DROP);
xe_gt_mcr_unicast_write(gt, XEHPC_EUSTALL_REPORT, write_ptr_reg, group, instance);
}
@@ -504,7 +504,7 @@ static int xe_eu_stall_data_buf_read(struct xe_eu_stall_data_stream *stream,
/* Read pointer can overflow into one additional bit */
read_ptr &= (buf_size << 1) - 1;
read_ptr_reg = REG_FIELD_PREP(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, (read_ptr >> 6));
- read_ptr_reg = _MASKED_FIELD(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, read_ptr_reg);
+ read_ptr_reg = REG_MASKED_FIELD(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, read_ptr_reg);
xe_gt_mcr_unicast_write(gt, XEHPC_EUSTALL_REPORT1, read_ptr_reg, group, instance);
xecore_buf->read = read_ptr;
trace_xe_eu_stall_data_read(group, instance, read_ptr, write_ptr,
@@ -674,7 +674,7 @@ static int xe_eu_stall_stream_enable(struct xe_eu_stall_data_stream *stream)
if (XE_GT_WA(gt, 22016596838))
xe_gt_mcr_multicast_write(gt, ROW_CHICKEN2,
- _MASKED_BIT_ENABLE(DISABLE_DOP_GATING));
+ REG_MASKED_FIELD_ENABLE(DISABLE_DOP_GATING));
for_each_dss_steering(xecore, gt, group, instance) {
write_ptr_reg = xe_gt_mcr_unicast_read(gt, XEHPC_EUSTALL_REPORT, group, instance);
@@ -683,7 +683,7 @@ static int xe_eu_stall_stream_enable(struct xe_eu_stall_data_stream *stream)
clear_dropped_eviction_line_bit(gt, group, instance);
write_ptr = REG_FIELD_GET(XEHPC_EUSTALL_REPORT_WRITE_PTR_MASK, write_ptr_reg);
read_ptr_reg = REG_FIELD_PREP(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, write_ptr);
- read_ptr_reg = _MASKED_FIELD(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, read_ptr_reg);
+ read_ptr_reg = REG_MASKED_FIELD(XEHPC_EUSTALL_REPORT1_READ_PTR_MASK, read_ptr_reg);
/* Initialize the read pointer to the write pointer */
xe_gt_mcr_unicast_write(gt, XEHPC_EUSTALL_REPORT1, read_ptr_reg, group, instance);
write_ptr <<= 6;
@@ -695,10 +695,10 @@ static int xe_eu_stall_stream_enable(struct xe_eu_stall_data_stream *stream)
stream->data_drop.reported_to_user = false;
bitmap_zero(stream->data_drop.mask, XE_MAX_DSS_FUSE_BITS);
- reg_value = _MASKED_FIELD(EUSTALL_MOCS | EUSTALL_SAMPLE_RATE,
- REG_FIELD_PREP(EUSTALL_MOCS, gt->mocs.uc_index << 1) |
- REG_FIELD_PREP(EUSTALL_SAMPLE_RATE,
- stream->sampling_rate_mult));
+ reg_value = REG_MASKED_FIELD(EUSTALL_MOCS | EUSTALL_SAMPLE_RATE,
+ REG_FIELD_PREP(EUSTALL_MOCS, gt->mocs.uc_index << 1) |
+ REG_FIELD_PREP(EUSTALL_SAMPLE_RATE,
+ stream->sampling_rate_mult));
xe_gt_mcr_multicast_write(gt, XEHPC_EUSTALL_CTRL, reg_value);
/* GGTT addresses can never be > 32 bits */
xe_gt_mcr_multicast_write(gt, XEHPC_EUSTALL_BASE_UPPER, 0);
@@ -830,7 +830,7 @@ static int xe_eu_stall_disable_locked(struct xe_eu_stall_data_stream *stream)
if (XE_GT_WA(gt, 22016596838))
xe_gt_mcr_multicast_write(gt, ROW_CHICKEN2,
- _MASKED_BIT_DISABLE(DISABLE_DOP_GATING));
+ REG_MASKED_FIELD_DISABLE(DISABLE_DOP_GATING));
xe_force_wake_put(gt_to_fw(gt), stream->fw_ref);
xe_pm_runtime_put(gt_to_xe(gt));
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index 7e8a3a7db741..755a2bff5d7b 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -47,7 +47,7 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc,
struct xe_mmio *mmio = &gt->mmio;
struct xe_device *xe = gt_to_xe(gt);
u64 lrc_desc;
- u32 ring_mode = _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE);
+ u32 ring_mode = REG_MASKED_FIELD_ENABLE(GFX_DISABLE_LEGACY_MODE);
lrc_desc = xe_lrc_descriptor(lrc);
@@ -61,7 +61,7 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc,
if (hwe->class == XE_ENGINE_CLASS_COMPUTE)
xe_mmio_write32(mmio, RCU_MODE,
- _MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE));
+ REG_MASKED_FIELD_ENABLE(RCU_MODE_CCS_ENABLE));
xe_lrc_write_ctx_reg(lrc, CTX_RING_TAIL, lrc->ring.tail);
lrc->ring.old_tail = lrc->ring.tail;
@@ -83,7 +83,7 @@ static void __start_lrc(struct xe_hw_engine *hwe, struct xe_lrc *lrc,
xe_mmio_read32(mmio, RING_HWS_PGA(hwe->mmio_base));
if (xe_device_has_msix(gt_to_xe(hwe->gt)))
- ring_mode |= _MASKED_BIT_ENABLE(GFX_MSIX_INTERRUPT_ENABLE);
+ ring_mode |= REG_MASKED_FIELD_ENABLE(GFX_MSIX_INTERRUPT_ENABLE);
xe_mmio_write32(mmio, RING_MODE(hwe->mmio_base), ring_mode);
xe_mmio_write32(mmio, RING_EXECLIST_SQ_CONTENTS_LO(hwe->mmio_base),
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index ea3ad600d7c7..337baf0a6e87 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -327,21 +327,21 @@ void xe_hw_engine_enable_ring(struct xe_hw_engine *hwe)
{
u32 ccs_mask =
xe_hw_engine_mask_per_class(hwe->gt, XE_ENGINE_CLASS_COMPUTE);
- u32 ring_mode = _MASKED_BIT_ENABLE(GFX_DISABLE_LEGACY_MODE);
+ u32 ring_mode = REG_MASKED_FIELD_ENABLE(GFX_DISABLE_LEGACY_MODE);
if (hwe->class == XE_ENGINE_CLASS_COMPUTE && ccs_mask)
xe_mmio_write32(&hwe->gt->mmio, RCU_MODE,
- _MASKED_BIT_ENABLE(RCU_MODE_CCS_ENABLE));
+ REG_MASKED_FIELD_ENABLE(RCU_MODE_CCS_ENABLE));
xe_hw_engine_mmio_write32(hwe, RING_HWSTAM(0), ~0x0);
xe_hw_engine_mmio_write32(hwe, RING_HWS_PGA(0),
xe_bo_ggtt_addr(hwe->hwsp));
if (xe_device_has_msix(gt_to_xe(hwe->gt)))
- ring_mode |= _MASKED_BIT_ENABLE(GFX_MSIX_INTERRUPT_ENABLE);
+ ring_mode |= REG_MASKED_FIELD_ENABLE(GFX_MSIX_INTERRUPT_ENABLE);
xe_hw_engine_mmio_write32(hwe, RING_MODE(0), ring_mode);
xe_hw_engine_mmio_write32(hwe, RING_MI_MODE(0),
- _MASKED_BIT_DISABLE(STOP_RING));
+ REG_MASKED_FIELD_DISABLE(STOP_RING));
xe_hw_engine_mmio_read32(hwe, RING_MI_MODE(0));
}
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index 35b365ac55e5..c5cfd8f75a94 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -642,12 +642,12 @@ static const u8 *reg_offsets(struct xe_device *xe, enum xe_engine_class class)
static void set_context_control(u32 *regs, struct xe_hw_engine *hwe)
{
- regs[CTX_CONTEXT_CONTROL] = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
- CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
+ regs[CTX_CONTEXT_CONTROL] = REG_MASKED_FIELD_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
+ CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
if (xe_gt_has_indirect_ring_state(hwe->gt))
regs[CTX_CONTEXT_CONTROL] |=
- _MASKED_BIT_ENABLE(CTX_CTRL_INDIRECT_RING_STATE_ENABLE);
+ REG_MASKED_FIELD_ENABLE(CTX_CTRL_INDIRECT_RING_STATE_ENABLE);
}
static void set_memory_based_intr(u32 *regs, struct xe_hw_engine *hwe)
@@ -1212,7 +1212,7 @@ static ssize_t setup_invalidate_state_cache_wa(struct xe_lrc *lrc,
*cmd++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1);
*cmd++ = CS_DEBUG_MODE2(0).addr;
- *cmd++ = _MASKED_BIT_ENABLE(INSTRUCTION_STATE_CACHE_INVALIDATE);
+ *cmd++ = REG_MASKED_FIELD_ENABLE(INSTRUCTION_STATE_CACHE_INVALIDATE);
return cmd - batch;
}
@@ -1515,12 +1515,12 @@ static int xe_lrc_ctx_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, struct
if (init_flags & XE_LRC_CREATE_RUNALONE)
xe_lrc_write_ctx_reg(lrc, CTX_CONTEXT_CONTROL,
xe_lrc_read_ctx_reg(lrc, CTX_CONTEXT_CONTROL) |
- _MASKED_BIT_ENABLE(CTX_CTRL_RUN_ALONE));
+ REG_MASKED_FIELD_ENABLE(CTX_CTRL_RUN_ALONE));
if (init_flags & XE_LRC_CREATE_PXP)
xe_lrc_write_ctx_reg(lrc, CTX_CONTEXT_CONTROL,
xe_lrc_read_ctx_reg(lrc, CTX_CONTEXT_CONTROL) |
- _MASKED_BIT_ENABLE(CTX_CTRL_PXP_ENABLE));
+ REG_MASKED_FIELD_ENABLE(CTX_CTRL_PXP_ENABLE));
lrc->ctx_timestamp = 0;
xe_lrc_write_ctx_reg(lrc, CTX_TIMESTAMP, 0);
@@ -1551,7 +1551,7 @@ static int xe_lrc_ctx_init(struct xe_lrc *lrc, struct xe_hw_engine *hwe, struct
if (init_flags & XE_LRC_DISABLE_STATE_CACHE_PERF_FIX) {
state_cache_perf_fix[0] = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1);
state_cache_perf_fix[1] = COMMON_SLICE_CHICKEN3.addr;
- state_cache_perf_fix[2] = _MASKED_BIT_ENABLE(DISABLE_STATE_CACHE_PERF_FIX);
+ state_cache_perf_fix[2] = REG_MASKED_FIELD_ENABLE(DISABLE_STATE_CACHE_PERF_FIX);
xe_lrc_write_ring(lrc, state_cache_perf_fix, sizeof(state_cache_perf_fix));
}
diff --git a/drivers/gpu/drm/xe/xe_mmio.c b/drivers/gpu/drm/xe/xe_mmio.c
index a1a05c68dc7d..78adb303b663 100644
--- a/drivers/gpu/drm/xe/xe_mmio.c
+++ b/drivers/gpu/drm/xe/xe_mmio.c
@@ -154,6 +154,15 @@ u8 xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg)
return val;
}
+void xe_mmio_write8(struct xe_mmio *mmio, struct xe_reg reg, u8 val)
+{
+ u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
+
+ trace_xe_reg_rw(mmio, true, addr, val, sizeof(val));
+
+ writeb(val, mmio->regs + addr);
+}
+
u16 xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg)
{
u32 addr = xe_mmio_adjusted_addr(mmio, reg.addr);
diff --git a/drivers/gpu/drm/xe/xe_mmio.h b/drivers/gpu/drm/xe/xe_mmio.h
index 41ae720acbc3..befe021f2215 100644
--- a/drivers/gpu/drm/xe/xe_mmio.h
+++ b/drivers/gpu/drm/xe/xe_mmio.h
@@ -17,6 +17,7 @@ int xe_mmio_probe_tiles(struct xe_device *xe);
void xe_mmio_init(struct xe_mmio *mmio, struct xe_tile *tile, void __iomem *ptr, u32 size);
u8 xe_mmio_read8(struct xe_mmio *mmio, struct xe_reg reg);
+void xe_mmio_write8(struct xe_mmio *mmio, struct xe_reg reg, u8 val);
u16 xe_mmio_read16(struct xe_mmio *mmio, struct xe_reg reg);
void xe_mmio_write32(struct xe_mmio *mmio, struct xe_reg reg, u32 val);
u32 xe_mmio_read32(struct xe_mmio *mmio, struct xe_reg reg);
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index 2efc16c3a95d..6337e671c97a 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -757,8 +757,9 @@ static int xe_oa_configure_oar_context(struct xe_oa_stream *stream, bool enable)
},
{
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
- _MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
- enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0)
+ enable ?
+ REG_MASKED_FIELD_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE) :
+ REG_MASKED_FIELD_DISABLE(CTX_CTRL_OAC_CONTEXT_ENABLE)
},
};
@@ -781,9 +782,9 @@ static int xe_oa_configure_oac_context(struct xe_oa_stream *stream, bool enable)
},
{
RING_CONTEXT_CONTROL(stream->hwe->mmio_base),
- _MASKED_FIELD(CTX_CTRL_OAC_CONTEXT_ENABLE,
- enable ? CTX_CTRL_OAC_CONTEXT_ENABLE : 0) |
- _MASKED_FIELD(CTX_CTRL_RUN_ALONE, enable ? CTX_CTRL_RUN_ALONE : 0),
+ enable ?
+ REG_MASKED_FIELD_ENABLE(CTX_CTRL_OAC_CONTEXT_ENABLE | CTX_CTRL_RUN_ALONE) :
+ REG_MASKED_FIELD_DISABLE(CTX_CTRL_OAC_CONTEXT_ENABLE | CTX_CTRL_RUN_ALONE),
},
};
@@ -811,9 +812,10 @@ static int xe_oa_configure_oa_context(struct xe_oa_stream *stream, bool enable)
static u32 oag_configure_mmio_trigger(const struct xe_oa_stream *stream, bool enable)
{
- return _MASKED_FIELD(OAG_OA_DEBUG_DISABLE_MMIO_TRG,
- enable && stream && stream->sample ?
- 0 : OAG_OA_DEBUG_DISABLE_MMIO_TRG);
+ if (enable && stream && stream->sample)
+ return REG_MASKED_FIELD_DISABLE(OAG_OA_DEBUG_DISABLE_MMIO_TRG);
+ else
+ return REG_MASKED_FIELD_ENABLE(OAG_OA_DEBUG_DISABLE_MMIO_TRG);
}
static void xe_oa_disable_metric_set(struct xe_oa_stream *stream)
@@ -824,9 +826,9 @@ static void xe_oa_disable_metric_set(struct xe_oa_stream *stream)
/* Enable thread stall DOP gating and EU DOP gating. */
if (XE_GT_WA(stream->gt, 1508761755)) {
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN,
- _MASKED_BIT_DISABLE(STALL_DOP_GATING_DISABLE));
+ REG_MASKED_FIELD_DISABLE(STALL_DOP_GATING_DISABLE));
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN2,
- _MASKED_BIT_DISABLE(DISABLE_DOP_GATING));
+ REG_MASKED_FIELD_DISABLE(DISABLE_DOP_GATING));
}
xe_mmio_write32(mmio, __oa_regs(stream)->oa_debug,
@@ -1054,16 +1056,18 @@ exit:
static u32 oag_report_ctx_switches(const struct xe_oa_stream *stream)
{
/* If user didn't require OA reports, ask HW not to emit ctx switch reports */
- return _MASKED_FIELD(OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS,
- stream->sample ?
- 0 : OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
+ if (stream->sample)
+ return REG_MASKED_FIELD_DISABLE(OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
+ else
+ return REG_MASKED_FIELD_ENABLE(OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
}
static u32 oag_buf_size_select(const struct xe_oa_stream *stream)
{
- return _MASKED_FIELD(OAG_OA_DEBUG_BUF_SIZE_SELECT,
- xe_bo_size(stream->oa_buffer.bo) > SZ_16M ?
- OAG_OA_DEBUG_BUF_SIZE_SELECT : 0);
+ if (xe_bo_size(stream->oa_buffer.bo) > SZ_16M)
+ return REG_MASKED_FIELD_ENABLE(OAG_OA_DEBUG_BUF_SIZE_SELECT);
+ else
+ return REG_MASKED_FIELD_DISABLE(OAG_OA_DEBUG_BUF_SIZE_SELECT);
}
static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
@@ -1078,9 +1082,9 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
*/
if (XE_GT_WA(stream->gt, 1508761755)) {
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN,
- _MASKED_BIT_ENABLE(STALL_DOP_GATING_DISABLE));
+ REG_MASKED_FIELD_ENABLE(STALL_DOP_GATING_DISABLE));
xe_gt_mcr_multicast_write(stream->gt, ROW_CHICKEN2,
- _MASKED_BIT_ENABLE(DISABLE_DOP_GATING));
+ REG_MASKED_FIELD_ENABLE(DISABLE_DOP_GATING));
}
/* Disable clk ratio reports */
@@ -1095,7 +1099,7 @@ static int xe_oa_enable_metric_set(struct xe_oa_stream *stream)
OAG_OA_DEBUG_DISABLE_START_TRG_1_COUNT_QUAL;
xe_mmio_write32(mmio, __oa_regs(stream)->oa_debug,
- _MASKED_BIT_ENABLE(oa_debug) |
+ REG_MASKED_FIELD_ENABLE(oa_debug) |
oag_report_ctx_switches(stream) |
oag_buf_size_select(stream) |
oag_configure_mmio_trigger(stream, true));
diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c
index 0d33c14ea0cf..dc66d0c7ee06 100644
--- a/drivers/gpu/drm/xe/xe_pcode.c
+++ b/drivers/gpu/drm/xe/xe_pcode.c
@@ -348,33 +348,3 @@ int xe_pcode_probe_early(struct xe_device *xe)
return xe_pcode_ready(xe, false);
}
ALLOW_ERROR_INJECTION(xe_pcode_probe_early, ERRNO); /* See xe_pci_probe */
-
-/* Helpers with drm device. These should only be called by the display side */
-#if IS_ENABLED(CONFIG_DRM_XE_DISPLAY)
-
-int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1)
-{
- struct xe_device *xe = to_xe_device(drm);
- struct xe_tile *tile = xe_device_get_root_tile(xe);
-
- return xe_pcode_read(tile, mbox, val, val1);
-}
-
-int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms)
-{
- struct xe_device *xe = to_xe_device(drm);
- struct xe_tile *tile = xe_device_get_root_tile(xe);
-
- return xe_pcode_write_timeout(tile, mbox, val, timeout_ms);
-}
-
-int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request,
- u32 reply_mask, u32 reply, int timeout_base_ms)
-{
- struct xe_device *xe = to_xe_device(drm);
- struct xe_tile *tile = xe_device_get_root_tile(xe);
-
- return xe_pcode_request(tile, mbox, request, reply_mask, reply, timeout_base_ms);
-}
-
-#endif
diff --git a/drivers/gpu/drm/xe/xe_pcode.h b/drivers/gpu/drm/xe/xe_pcode.h
index a5584c1c75f9..490e4f269607 100644
--- a/drivers/gpu/drm/xe/xe_pcode.h
+++ b/drivers/gpu/drm/xe/xe_pcode.h
@@ -34,12 +34,4 @@ int xe_pcode_request(struct xe_tile *tile, u32 mbox, u32 request,
| FIELD_PREP(PCODE_MB_PARAM1, param1)\
| FIELD_PREP(PCODE_MB_PARAM2, param2))
-/* Helpers with drm device */
-int intel_pcode_read(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1);
-int intel_pcode_write_timeout(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms);
-#define intel_pcode_write(drm, mbox, val) \
- intel_pcode_write_timeout((drm), (mbox), (val), 1)
-int intel_pcode_request(struct drm_device *drm, u32 mbox, u32 request,
- u32 reply_mask, u32 reply, int timeout_base_ms);
-
#endif
diff --git a/drivers/gpu/drm/xe/xe_pxp.c b/drivers/gpu/drm/xe/xe_pxp.c
index d61446bf9c19..e2978e48f660 100644
--- a/drivers/gpu/drm/xe/xe_pxp.c
+++ b/drivers/gpu/drm/xe/xe_pxp.c
@@ -312,8 +312,8 @@ void xe_pxp_irq_handler(struct xe_device *xe, u16 iir)
static int kcr_pxp_set_status(const struct xe_pxp *pxp, bool enable)
{
- u32 val = enable ? _MASKED_BIT_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES) :
- _MASKED_BIT_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES);
+ u32 val = enable ? REG_MASKED_FIELD_ENABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES) :
+ REG_MASKED_FIELD_DISABLE(KCR_INIT_ALLOW_DISPLAY_ME_WRITES);
CLASS(xe_force_wake, fw_ref)(gt_to_fw(pxp->gt), XE_FW_GT);
if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FW_GT))
diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c
index d35bc4989144..9cebb2490245 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw.c
+++ b/drivers/gpu/drm/xe/xe_uc_fw.c
@@ -881,7 +881,7 @@ static int uc_fw_xfer(struct xe_uc_fw *uc_fw, u32 offset, u32 dma_flags)
/* Start the DMA */
xe_mmio_write32(mmio, DMA_CTRL,
- _MASKED_BIT_ENABLE(dma_flags | START_DMA));
+ REG_MASKED_FIELD_ENABLE(dma_flags | START_DMA));
/* Wait for DMA to finish */
ret = xe_mmio_wait32(mmio, DMA_CTRL, START_DMA, 0, 100000, &dma_ctrl,
@@ -891,7 +891,7 @@ static int uc_fw_xfer(struct xe_uc_fw *uc_fw, u32 offset, u32 dma_flags)
xe_uc_fw_type_repr(uc_fw->type), dma_ctrl);
/* Disable the bits once DMA is over */
- xe_mmio_write32(mmio, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
+ xe_mmio_write32(mmio, DMA_CTRL, REG_MASKED_FIELD_DISABLE(dma_flags));
return ret;
}
diff --git a/drivers/gpu/tests/gpu_buddy_test.c b/drivers/gpu/tests/gpu_buddy_test.c
index 5429010f34d3..7df5c2ae83bb 100644
--- a/drivers/gpu/tests/gpu_buddy_test.c
+++ b/drivers/gpu/tests/gpu_buddy_test.c
@@ -21,6 +21,170 @@ static inline u64 get_size(int order, u64 chunk_size)
return (1 << order) * chunk_size;
}
+static void gpu_test_buddy_subtree_offset_alignment_stress(struct kunit *test)
+{
+ struct gpu_buddy_block *block;
+ struct rb_node *node = NULL;
+ const u64 mm_size = SZ_2M;
+ const u64 alignments[] = {
+ SZ_1M,
+ SZ_512K,
+ SZ_256K,
+ SZ_128K,
+ SZ_64K,
+ SZ_32K,
+ SZ_16K,
+ SZ_8K,
+ };
+ struct list_head allocated[ARRAY_SIZE(alignments)];
+ unsigned int i, max_subtree_align = 0;
+ int ret, tree, order;
+ struct gpu_buddy mm;
+
+ KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_init(&mm, mm_size, SZ_4K),
+ "buddy_init failed\n");
+
+ for (i = 0; i < ARRAY_SIZE(allocated); i++)
+ INIT_LIST_HEAD(&allocated[i]);
+
+ /*
+ * Exercise subtree_max_alignment tracking by allocating blocks with descending
+ * alignment constraints and freeing them in reverse order. This verifies that
+ * free-tree augmentation correctly propagates the maximum offset alignment
+ * present in each subtree at every stage.
+ */
+
+ for (i = 0; i < ARRAY_SIZE(alignments); i++) {
+ struct gpu_buddy_block *root = NULL;
+ unsigned int expected;
+ u64 align;
+
+ align = alignments[i];
+ expected = ilog2(align) - 1;
+
+ for (;;) {
+ ret = gpu_buddy_alloc_blocks(&mm,
+ 0, mm_size,
+ SZ_4K, align,
+ &allocated[i],
+ 0);
+ if (ret)
+ break;
+
+ block = list_last_entry(&allocated[i],
+ struct gpu_buddy_block,
+ link);
+ KUNIT_EXPECT_TRUE(test, IS_ALIGNED(gpu_buddy_block_offset(block), align));
+ }
+
+ for (order = mm.max_order; order >= 0 && !root; order--) {
+ for (tree = 0; tree < 2; tree++) {
+ node = mm.free_trees[tree][order].rb_node;
+ if (node) {
+ root = container_of(node,
+ struct gpu_buddy_block,
+ rb);
+ break;
+ }
+ }
+ }
+
+ KUNIT_ASSERT_NOT_NULL(test, root);
+ KUNIT_EXPECT_EQ(test, root->subtree_max_alignment, expected);
+ }
+
+ for (i = ARRAY_SIZE(alignments); i-- > 0; ) {
+ gpu_buddy_free_list(&mm, &allocated[i], 0);
+
+ for (order = 0; order <= mm.max_order; order++) {
+ for (tree = 0; tree < 2; tree++) {
+ node = mm.free_trees[tree][order].rb_node;
+ if (!node)
+ continue;
+
+ block = container_of(node, struct gpu_buddy_block, rb);
+ max_subtree_align = max(max_subtree_align,
+ block->subtree_max_alignment);
+ }
+ }
+
+ KUNIT_EXPECT_GE(test, max_subtree_align, ilog2(alignments[i]));
+ }
+
+ gpu_buddy_fini(&mm);
+}
+
+static void gpu_test_buddy_offset_aligned_allocation(struct kunit *test)
+{
+ struct gpu_buddy_block *block, *tmp;
+ int num_blocks, i, count = 0;
+ LIST_HEAD(allocated);
+ struct gpu_buddy mm;
+ u64 mm_size = SZ_4M;
+ LIST_HEAD(freed);
+
+ KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_init(&mm, mm_size, SZ_4K),
+ "buddy_init failed\n");
+
+ num_blocks = mm_size / SZ_256K;
+ /*
+ * Allocate multiple sizes under a fixed offset alignment.
+ * Ensures alignment handling is independent of allocation size and
+ * exercises subtree max-alignment pruning for small requests.
+ */
+ for (i = 0; i < num_blocks; i++)
+ KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, SZ_8K, SZ_256K,
+ &allocated, 0),
+ "buddy_alloc hit an error size=%u\n", SZ_8K);
+
+ list_for_each_entry(block, &allocated, link) {
+ /* Ensure the allocated block uses the expected 8 KB size */
+ KUNIT_EXPECT_EQ(test, gpu_buddy_block_size(&mm, block), SZ_8K);
+ /* Ensure the block starts at a 256 KB-aligned offset for proper alignment */
+ KUNIT_EXPECT_TRUE(test, IS_ALIGNED(gpu_buddy_block_offset(block), SZ_256K));
+ }
+ gpu_buddy_free_list(&mm, &allocated, 0);
+
+ for (i = 0; i < num_blocks; i++)
+ KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, SZ_16K, SZ_256K,
+ &allocated, 0),
+ "buddy_alloc hit an error size=%u\n", SZ_16K);
+
+ list_for_each_entry(block, &allocated, link) {
+ /* Ensure the allocated block uses the expected 16 KB size */
+ KUNIT_EXPECT_EQ(test, gpu_buddy_block_size(&mm, block), SZ_16K);
+ /* Ensure the block starts at a 256 KB-aligned offset for proper alignment */
+ KUNIT_EXPECT_TRUE(test, IS_ALIGNED(gpu_buddy_block_offset(block), SZ_256K));
+ }
+
+ /*
+ * Free alternating aligned blocks to introduce fragmentation.
+ * Ensures offset-aligned allocations remain valid after frees and
+ * verifies subtree max-alignment metadata is correctly maintained.
+ */
+ list_for_each_entry_safe(block, tmp, &allocated, link) {
+ if (count % 2 == 0)
+ list_move_tail(&block->link, &freed);
+ count++;
+ }
+ gpu_buddy_free_list(&mm, &freed, 0);
+
+ for (i = 0; i < num_blocks / 2; i++)
+ KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, SZ_16K, SZ_256K,
+ &allocated, 0),
+ "buddy_alloc hit an error size=%u\n", SZ_16K);
+
+ /*
+ * Allocate with offset alignment after all slots are used; must fail.
+ * Confirms that no aligned offsets remain.
+ */
+ KUNIT_ASSERT_TRUE_MSG(test, gpu_buddy_alloc_blocks(&mm, 0, mm_size, SZ_16K, SZ_256K,
+ &allocated, 0),
+ "buddy_alloc hit an error size=%u\n", SZ_16K);
+ gpu_buddy_free_list(&mm, &allocated, 0);
+ gpu_buddy_fini(&mm);
+}
+
static void gpu_test_buddy_fragmentation_performance(struct kunit *test)
{
struct gpu_buddy_block *block, *tmp;
@@ -362,6 +526,332 @@ static void gpu_test_buddy_alloc_range_bias(struct kunit *test)
gpu_buddy_fini(&mm);
}
+static void gpu_test_buddy_alloc_range(struct kunit *test)
+{
+ GPU_RND_STATE(prng, random_seed);
+ struct gpu_buddy_block *block;
+ struct gpu_buddy mm;
+ u32 mm_size, total;
+ LIST_HEAD(blocks);
+ LIST_HEAD(tmp);
+ u32 ps = SZ_4K;
+ int ret;
+
+ mm_size = SZ_16M;
+
+ KUNIT_ASSERT_FALSE_MSG(test, gpu_buddy_init(&mm, mm_size, ps),
+ "buddy_init failed\n");
+
+ /*
+ * Basic exact-range allocation.
+ * Allocate the entire mm as one exact range (start + size == end).
+ * This is the simplest case exercising __gpu_buddy_alloc_range.
+ */
+ ret = gpu_buddy_alloc_blocks(&mm, 0, mm_size, mm_size, ps, &blocks, 0);
+ KUNIT_ASSERT_EQ_MSG(test, ret, 0,
+ "exact-range alloc of full mm failed\n");
+
+ total = 0;
+ list_for_each_entry(block, &blocks, link) {
+ u64 offset = gpu_buddy_block_offset(block);
+ u64 bsize = gpu_buddy_block_size(&mm, block);
+
+ KUNIT_EXPECT_TRUE_MSG(test, offset + bsize <= (u64)mm_size,
+ "block [%llx, %llx) outside mm\n", offset, offset + bsize);
+ total += (u32)bsize;
+ }
+ KUNIT_EXPECT_EQ(test, total, mm_size);
+ KUNIT_EXPECT_EQ(test, mm.avail, 0ULL);
+
+ /* Full mm should be exhausted */
+ ret = gpu_buddy_alloc_blocks(&mm, 0, ps, ps, ps, &tmp, 0);
+ KUNIT_EXPECT_NE_MSG(test, ret, 0, "alloc should fail when mm is full\n");
+
+ gpu_buddy_free_list(&mm, &blocks, 0);
+ KUNIT_EXPECT_EQ(test, mm.avail, (u64)mm_size);
+ gpu_buddy_fini(&mm);
+
+ /*
+ * Exact-range allocation of sub-ranges.
+ * Split the mm into four equal quarters and allocate each as an exact
+ * range. Validates splitting and non-overlapping exact allocations.
+ */
+ KUNIT_ASSERT_FALSE(test, gpu_buddy_init(&mm, mm_size, ps));
+
+ {
+ u32 quarter = mm_size / 4;
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ u32 start = i * quarter;
+ u32 end = start + quarter;
+
+ ret = gpu_buddy_alloc_blocks(&mm, start, end, quarter, ps, &blocks, 0);
+ KUNIT_ASSERT_EQ_MSG(test, ret, 0,
+ "exact-range alloc quarter %d [%x, %x) failed\n",
+ i, start, end);
+ }
+ KUNIT_EXPECT_EQ(test, mm.avail, 0ULL);
+ gpu_buddy_free_list(&mm, &blocks, 0);
+ }
+
+ gpu_buddy_fini(&mm);
+
+ /*
+ * Minimum chunk-size exact range at various offsets.
+ * Allocate single-page exact ranges at the start, middle and end.
+ */
+ KUNIT_ASSERT_FALSE(test, gpu_buddy_init(&mm, mm_size, ps));
+
+ ret = gpu_buddy_alloc_blocks(&mm, 0, ps, ps, ps, &blocks, 0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = gpu_buddy_alloc_blocks(&mm, mm_size / 2, mm_size / 2 + ps, ps, ps, &blocks, 0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = gpu_buddy_alloc_blocks(&mm, mm_size - ps, mm_size, ps, ps, &blocks, 0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ total = 0;
+ list_for_each_entry(block, &blocks, link)
+ total += (u32)gpu_buddy_block_size(&mm, block);
+ KUNIT_EXPECT_EQ(test, total, 3 * ps);
+
+ gpu_buddy_free_list(&mm, &blocks, 0);
+ gpu_buddy_fini(&mm);
+
+ /*
+ * Non power-of-two mm size (multiple roots).
+ * Exact-range allocations that span root boundaries must still work.
+ */
+ mm_size = SZ_4M + SZ_2M + SZ_1M; /* 7 MiB, three roots */
+
+ KUNIT_ASSERT_FALSE(test, gpu_buddy_init(&mm, mm_size, ps));
+ KUNIT_EXPECT_GT(test, mm.n_roots, 1U);
+
+ /* Allocate first 4M root exactly */
+ ret = gpu_buddy_alloc_blocks(&mm, 0, SZ_4M, SZ_4M, ps, &blocks, 0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ /* Allocate second root (4M-6M) exactly */
+ ret = gpu_buddy_alloc_blocks(&mm, SZ_4M, SZ_4M + SZ_2M, SZ_2M, ps, &blocks, 0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ /* Allocate third root (6M-7M) exactly */
+ ret = gpu_buddy_alloc_blocks(&mm, SZ_4M + SZ_2M, mm_size, SZ_1M, ps, &blocks, 0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_EQ(test, mm.avail, 0ULL);
+ gpu_buddy_free_list(&mm, &blocks, 0);
+
+ /* Cross-root exact-range: the entire non-pot mm */
+ ret = gpu_buddy_alloc_blocks(&mm, 0, mm_size, mm_size, ps, &blocks, 0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, mm.avail, 0ULL);
+
+ gpu_buddy_free_list(&mm, &blocks, 0);
+ gpu_buddy_fini(&mm);
+
+ /*
+ * Randomized exact-range allocations.
+ * Divide the mm into N random-sized, contiguous, page-aligned slices
+ * and allocate each as an exact range in random order.
+ */
+ mm_size = SZ_16M;
+ KUNIT_ASSERT_FALSE(test, gpu_buddy_init(&mm, mm_size, ps));
+
+ {
+#define N_RAND_RANGES 16
+ u32 ranges[N_RAND_RANGES + 1]; /* boundaries */
+ u32 order_arr[N_RAND_RANGES];
+ u32 remaining = mm_size;
+ int i;
+
+ ranges[0] = 0;
+ for (i = 0; i < N_RAND_RANGES - 1; i++) {
+ u32 max_chunk = remaining - (N_RAND_RANGES - 1 - i) * ps;
+ u32 sz = max(round_up(prandom_u32_state(&prng) % max_chunk, ps), ps);
+
+ ranges[i + 1] = ranges[i] + sz;
+ remaining -= sz;
+ }
+ ranges[N_RAND_RANGES] = mm_size;
+
+ /* Create a random order */
+ for (i = 0; i < N_RAND_RANGES; i++)
+ order_arr[i] = i;
+ for (i = N_RAND_RANGES - 1; i > 0; i--) {
+ u32 j = prandom_u32_state(&prng) % (i + 1);
+ u32 tmp_val = order_arr[i];
+
+ order_arr[i] = order_arr[j];
+ order_arr[j] = tmp_val;
+ }
+
+ for (i = 0; i < N_RAND_RANGES; i++) {
+ u32 idx = order_arr[i];
+ u32 start = ranges[idx];
+ u32 end = ranges[idx + 1];
+ u32 sz = end - start;
+
+ ret = gpu_buddy_alloc_blocks(&mm, start, end, sz, ps, &blocks, 0);
+ KUNIT_ASSERT_EQ_MSG(test, ret, 0,
+ "random exact-range [%x, %x) sz=%x failed\n",
+ start, end, sz);
+ }
+
+ KUNIT_EXPECT_EQ(test, mm.avail, 0ULL);
+ gpu_buddy_free_list(&mm, &blocks, 0);
+#undef N_RAND_RANGES
+ }
+
+ gpu_buddy_fini(&mm);
+
+ /*
+ * Negative case - partially allocated range.
+ * Allocate the first half, then try to exact-range allocate the full
+ * mm. This must fail because the first half is already occupied.
+ */
+ mm_size = SZ_16M;
+ KUNIT_ASSERT_FALSE(test, gpu_buddy_init(&mm, mm_size, ps));
+
+ ret = gpu_buddy_alloc_blocks(&mm, 0, mm_size / 2, mm_size / 2, ps, &blocks, 0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = gpu_buddy_alloc_blocks(&mm, 0, mm_size, mm_size, ps, &tmp, 0);
+ KUNIT_EXPECT_NE_MSG(test, ret, 0,
+ "exact-range alloc should fail when range is partially used\n");
+
+ /* Also try the already-occupied sub-range directly */
+ ret = gpu_buddy_alloc_blocks(&mm, 0, mm_size / 2, mm_size / 2, ps, &tmp, 0);
+ KUNIT_EXPECT_NE_MSG(test, ret, 0,
+ "double alloc of same exact range should fail\n");
+
+ /* The free second half should still be allocatable */
+ ret = gpu_buddy_alloc_blocks(&mm, mm_size / 2, mm_size, mm_size / 2, ps, &blocks, 0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ KUNIT_EXPECT_EQ(test, mm.avail, 0ULL);
+ gpu_buddy_free_list(&mm, &blocks, 0);
+ gpu_buddy_fini(&mm);
+
+ /*
+ * Negative case - checkerboard partial allocation.
+ * Allocate every other page-sized chunk in a small mm, then try to
+ * exact-range allocate a range covering two pages (one allocated, one
+ * free). This must fail.
+ */
+ mm_size = SZ_64K;
+ KUNIT_ASSERT_FALSE(test, gpu_buddy_init(&mm, mm_size, ps));
+
+ {
+ u32 off;
+
+ for (off = 0; off < mm_size; off += 2 * ps) {
+ ret = gpu_buddy_alloc_blocks(&mm, off, off + ps, ps, ps, &blocks, 0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ }
+
+ /* Try exact range over a pair [allocated, free] */
+ ret = gpu_buddy_alloc_blocks(&mm, 0, 2 * ps, 2 * ps, ps, &tmp, 0);
+ KUNIT_EXPECT_NE_MSG(test, ret, 0,
+ "exact-range over partially allocated pair should fail\n");
+
+ /* The free pages individually should still work */
+ ret = gpu_buddy_alloc_blocks(&mm, ps, 2 * ps, ps, ps, &blocks, 0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ gpu_buddy_free_list(&mm, &blocks, 0);
+ }
+
+ gpu_buddy_fini(&mm);
+
+ /* Negative case - misaligned start/end/size */
+ mm_size = SZ_16M;
+ KUNIT_ASSERT_FALSE(test, gpu_buddy_init(&mm, mm_size, ps));
+
+ /* start not aligned to chunk_size */
+ ret = gpu_buddy_alloc_blocks(&mm, ps / 2, ps / 2 + ps, ps, ps, &tmp, 0);
+ KUNIT_EXPECT_NE(test, ret, 0);
+
+ /* size not aligned */
+ ret = gpu_buddy_alloc_blocks(&mm, 0, ps + 1, ps + 1, ps, &tmp, 0);
+ KUNIT_EXPECT_NE(test, ret, 0);
+
+ /* end exceeds mm size */
+ ret = gpu_buddy_alloc_blocks(&mm, mm_size, mm_size + ps, ps, ps, &tmp, 0);
+ KUNIT_EXPECT_NE(test, ret, 0);
+
+ gpu_buddy_fini(&mm);
+
+ /*
+ * Free and re-allocate the same exact range.
+ * This exercises merge-on-free followed by exact-range re-split.
+ */
+ mm_size = SZ_16M;
+ KUNIT_ASSERT_FALSE(test, gpu_buddy_init(&mm, mm_size, ps));
+
+ {
+ int i;
+
+ for (i = 0; i < 5; i++) {
+ ret = gpu_buddy_alloc_blocks(&mm, SZ_4M, SZ_4M + SZ_2M,
+ SZ_2M, ps, &blocks, 0);
+ KUNIT_ASSERT_EQ_MSG(test, ret, 0,
+ "re-alloc iteration %d failed\n", i);
+
+ total = 0;
+ list_for_each_entry(block, &blocks, link) {
+ u64 offset = gpu_buddy_block_offset(block);
+ u64 bsize = gpu_buddy_block_size(&mm, block);
+
+ KUNIT_EXPECT_GE(test, offset, (u64)SZ_4M);
+ KUNIT_EXPECT_LE(test, offset + bsize, (u64)(SZ_4M + SZ_2M));
+ total += (u32)bsize;
+ }
+ KUNIT_EXPECT_EQ(test, total, SZ_2M);
+
+ gpu_buddy_free_list(&mm, &blocks, 0);
+ }
+
+ KUNIT_EXPECT_EQ(test, mm.avail, (u64)mm_size);
+ }
+
+ gpu_buddy_fini(&mm);
+
+ /*
+ * Various power-of-two exact ranges within a large mm.
+ * Allocate non-overlapping power-of-two exact ranges at their natural
+ * alignment, validating that the allocator handles different orders.
+ */
+ mm_size = SZ_16M;
+ KUNIT_ASSERT_FALSE(test, gpu_buddy_init(&mm, mm_size, ps));
+
+ /* Allocate 4K at offset 0 */
+ ret = gpu_buddy_alloc_blocks(&mm, 0, SZ_4K, SZ_4K, ps, &blocks, 0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ /* Allocate 64K at offset 64K */
+ ret = gpu_buddy_alloc_blocks(&mm, SZ_64K, SZ_64K + SZ_64K, SZ_64K, ps, &blocks, 0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ /* Allocate 1M at offset 1M */
+ ret = gpu_buddy_alloc_blocks(&mm, SZ_1M, SZ_1M + SZ_1M, SZ_1M, ps, &blocks, 0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ /* Allocate 4M at offset 4M */
+ ret = gpu_buddy_alloc_blocks(&mm, SZ_4M, SZ_4M + SZ_4M, SZ_4M, ps, &blocks, 0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ total = 0;
+ list_for_each_entry(block, &blocks, link)
+ total += (u32)gpu_buddy_block_size(&mm, block);
+ KUNIT_EXPECT_EQ(test, total, SZ_4K + SZ_64K + SZ_1M + SZ_4M);
+
+ gpu_buddy_free_list(&mm, &blocks, 0);
+ gpu_buddy_fini(&mm);
+}
+
static void gpu_test_buddy_alloc_clear(struct kunit *test)
{
unsigned long n_pages, total, i = 0;
@@ -909,9 +1399,12 @@ static struct kunit_case gpu_buddy_tests[] = {
KUNIT_CASE(gpu_test_buddy_alloc_pathological),
KUNIT_CASE(gpu_test_buddy_alloc_contiguous),
KUNIT_CASE(gpu_test_buddy_alloc_clear),
+ KUNIT_CASE(gpu_test_buddy_alloc_range),
KUNIT_CASE(gpu_test_buddy_alloc_range_bias),
KUNIT_CASE_SLOW(gpu_test_buddy_fragmentation_performance),
KUNIT_CASE(gpu_test_buddy_alloc_exceeds_max_order),
+ KUNIT_CASE(gpu_test_buddy_offset_aligned_allocation),
+ KUNIT_CASE(gpu_test_buddy_subtree_offset_alignment_stress),
{}
};
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 15274b8a1d97..bf391903443d 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -271,6 +271,8 @@ int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper);
+bool drm_fb_helper_gem_is_fb(const struct drm_fb_helper *fb_helper,
+ const struct drm_gem_object *obj);
#endif
#endif
diff --git a/include/drm/intel/display_parent_interface.h b/include/drm/intel/display_parent_interface.h
index ce946859a3a9..97ec94a2e749 100644
--- a/include/drm/intel/display_parent_interface.h
+++ b/include/drm/intel/display_parent_interface.h
@@ -9,19 +9,66 @@
struct dma_fence;
struct drm_crtc;
struct drm_device;
+struct drm_file;
struct drm_framebuffer;
struct drm_gem_object;
+struct drm_mode_fb_cmd2;
struct drm_plane_state;
struct drm_scanout_buffer;
struct i915_vma;
+struct intel_dpt;
+struct intel_dsb_buffer;
+struct intel_frontbuffer;
struct intel_hdcp_gsc_context;
struct intel_initial_plane_config;
struct intel_panic;
struct intel_stolen_node;
struct ref_tracker;
+struct seq_file;
+struct vm_area_struct;
/* Keep struct definitions sorted */
+struct intel_display_bo_interface {
+ bool (*is_tiled)(struct drm_gem_object *obj); /* Optional */
+ bool (*is_userptr)(struct drm_gem_object *obj); /* Optional */
+ bool (*is_shmem)(struct drm_gem_object *obj); /* Optional */
+ bool (*is_protected)(struct drm_gem_object *obj);
+ int (*key_check)(struct drm_gem_object *obj);
+ int (*fb_mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma);
+ int (*read_from_page)(struct drm_gem_object *obj, u64 offset, void *dst, int size);
+ void (*describe)(struct seq_file *m, struct drm_gem_object *obj); /* Optional */
+ int (*framebuffer_init)(struct drm_gem_object *obj, struct drm_mode_fb_cmd2 *mode_cmd);
+ void (*framebuffer_fini)(struct drm_gem_object *obj);
+ struct drm_gem_object *(*framebuffer_lookup)(struct drm_device *drm,
+ struct drm_file *filp,
+ const struct drm_mode_fb_cmd2 *user_mode_cmd);
+};
+
+struct intel_display_dpt_interface {
+ struct intel_dpt *(*create)(struct drm_gem_object *obj, size_t size);
+ void (*destroy)(struct intel_dpt *dpt);
+ void (*suspend)(struct intel_dpt *dpt);
+ void (*resume)(struct intel_dpt *dpt);
+};
+
+struct intel_display_dsb_interface {
+ u32 (*ggtt_offset)(struct intel_dsb_buffer *dsb_buf);
+ void (*write)(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val);
+ u32 (*read)(struct intel_dsb_buffer *dsb_buf, u32 idx);
+ void (*fill)(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size);
+ struct intel_dsb_buffer *(*create)(struct drm_device *drm, size_t size);
+ void (*cleanup)(struct intel_dsb_buffer *dsb_buf);
+ void (*flush_map)(struct intel_dsb_buffer *dsb_buf);
+};
+
+struct intel_display_frontbuffer_interface {
+ struct intel_frontbuffer *(*get)(struct drm_gem_object *obj);
+ void (*ref)(struct intel_frontbuffer *front);
+ void (*put)(struct intel_frontbuffer *front);
+ void (*flush_for_display)(struct intel_frontbuffer *front);
+};
+
struct intel_display_hdcp_interface {
ssize_t (*gsc_msg_send)(struct intel_hdcp_gsc_context *gsc_context,
void *msg_in, size_t msg_in_len,
@@ -44,6 +91,35 @@ struct intel_display_irq_interface {
void (*synchronize)(struct drm_device *drm);
};
+struct intel_display_overlay_interface {
+ bool (*is_active)(struct drm_device *drm);
+
+ int (*overlay_on)(struct drm_device *drm,
+ u32 frontbuffer_bits);
+ int (*overlay_continue)(struct drm_device *drm,
+ struct i915_vma *vma,
+ bool load_polyphase_filter);
+ int (*overlay_off)(struct drm_device *drm);
+ int (*recover_from_interrupt)(struct drm_device *drm);
+ int (*release_old_vid)(struct drm_device *drm);
+
+ void (*reset)(struct drm_device *drm);
+
+ struct i915_vma *(*pin_fb)(struct drm_device *drm,
+ struct drm_gem_object *obj,
+ u32 *offset);
+ void (*unpin_fb)(struct drm_device *drm,
+ struct i915_vma *vma);
+
+ struct drm_gem_object *(*obj_lookup)(struct drm_device *drm,
+ struct drm_file *filp,
+ u32 handle);
+
+ void __iomem *(*setup)(struct drm_device *drm,
+ bool needs_physical);
+ void (*cleanup)(struct drm_device *drm);
+};
+
struct intel_display_panic_interface {
struct intel_panic *(*alloc)(void);
int (*setup)(struct intel_panic *panic, struct drm_scanout_buffer *sb);
@@ -55,6 +131,13 @@ struct intel_display_pc8_interface {
void (*unblock)(struct drm_device *drm);
};
+struct intel_display_pcode_interface {
+ int (*read)(struct drm_device *drm, u32 mbox, u32 *val, u32 *val1);
+ int (*write)(struct drm_device *drm, u32 mbox, u32 val, int timeout_ms);
+ int (*request)(struct drm_device *drm, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms);
+};
+
struct intel_display_rpm_interface {
struct ref_tracker *(*get)(const struct drm_device *drm);
struct ref_tracker *(*get_raw)(const struct drm_device *drm);
@@ -93,6 +176,10 @@ struct intel_display_stolen_interface {
void (*node_free)(const struct intel_stolen_node *node);
};
+struct intel_display_vma_interface {
+ int (*fence_id)(const struct i915_vma *vma);
+};
+
/**
* struct intel_display_parent_interface - services parent driver provides to display
*
@@ -106,6 +193,18 @@ struct intel_display_stolen_interface {
* check the optional pointers.
*/
struct intel_display_parent_interface {
+ /** @bo: BO interface */
+ const struct intel_display_bo_interface *bo;
+
+ /** @dpt: DPT interface. Optional. */
+ const struct intel_display_dpt_interface *dpt;
+
+ /** @dsb: DSB buffer interface */
+ const struct intel_display_dsb_interface *dsb;
+
+ /** @frontbuffer: Frontbuffer interface */
+ const struct intel_display_frontbuffer_interface *frontbuffer;
+
/** @hdcp: HDCP GSC interface */
const struct intel_display_hdcp_interface *hdcp;
@@ -118,9 +217,15 @@ struct intel_display_parent_interface {
/** @panic: Panic interface */
const struct intel_display_panic_interface *panic;
+ /** @overlay: Overlay. Optional. */
+ const struct intel_display_overlay_interface *overlay;
+
/** @pc8: PC8 interface. Optional. */
const struct intel_display_pc8_interface *pc8;
+ /** @pcode: Pcode interface */
+ const struct intel_display_pcode_interface *pcode;
+
/** @rpm: Runtime PM functions */
const struct intel_display_rpm_interface *rpm;
@@ -130,6 +235,9 @@ struct intel_display_parent_interface {
/** @stolen: Stolen memory. */
const struct intel_display_stolen_interface *stolen;
+ /** @vma: VMA interface. Optional. */
+ const struct intel_display_vma_interface *vma;
+
/* Generic independent functions */
struct {
/** @fence_priority_display: Set display priority. Optional. */
diff --git a/include/drm/intel/i915_drm.h b/include/drm/intel/i915_drm.h
index adff68538484..1fdaabed1470 100644
--- a/include/drm/intel/i915_drm.h
+++ b/include/drm/intel/i915_drm.h
@@ -39,46 +39,46 @@ bool i915_gpu_turbo_disable(void);
extern struct resource intel_graphics_stolen_res;
/*
- * The Bridge device's PCI config space has information about the
- * fb aperture size and the amount of pre-reserved memory.
- * This is all handled in the intel-gtt.ko module. i915.ko only
- * cares about the vga bit for the vga arbiter.
+ * The bridge device's (device 0) PCI config space has information
+ * about the fb aperture size and the amount of pre-reserved memory.
*/
-#define INTEL_GMCH_CTRL 0x52
-#define INTEL_GMCH_VGA_DISABLE (1 << 1)
+
+/* device 2 has a read-only mirror */
#define SNB_GMCH_CTRL 0x50
-#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
-#define SNB_GMCH_GGMS_MASK 0x3
-#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
-#define SNB_GMCH_GMS_MASK 0x1f
-#define BDW_GMCH_GGMS_SHIFT 6
-#define BDW_GMCH_GGMS_MASK 0x3
-#define BDW_GMCH_GMS_SHIFT 8
-#define BDW_GMCH_GMS_MASK 0xff
+#define SNB_GMCH_GGMS_SHIFT 8 /* GTT Graphics Memory Size */
+#define SNB_GMCH_GGMS_MASK 0x3
+#define SNB_GMCH_GMS_SHIFT 3 /* Graphics Mode Select */
+#define SNB_GMCH_GMS_MASK 0x1f
+#define BDW_GMCH_GGMS_SHIFT 6
+#define BDW_GMCH_GGMS_MASK 0x3
+#define BDW_GMCH_GMS_SHIFT 8
+#define BDW_GMCH_GMS_MASK 0xff
+/* device 2 has a read-only mirror from i85x/i865 onwards */
#define I830_GMCH_CTRL 0x52
+#define I830_GMCH_GMS_MASK (0x7 << 4)
+#define I830_GMCH_GMS_LOCAL (0x1 << 4)
+#define I830_GMCH_GMS_STOLEN_512 (0x2 << 4)
+#define I830_GMCH_GMS_STOLEN_1024 (0x3 << 4)
+#define I830_GMCH_GMS_STOLEN_8192 (0x4 << 4)
+#define I855_GMCH_GMS_MASK (0xF << 4)
+#define I855_GMCH_GMS_STOLEN_0M (0x0 << 4)
+#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
+#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
+#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
+#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
+#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
+#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
+#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
+#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
+#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
+#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
+#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
+#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
+#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
-#define I830_GMCH_GMS_MASK 0x70
-#define I830_GMCH_GMS_LOCAL 0x10
-#define I830_GMCH_GMS_STOLEN_512 0x20
-#define I830_GMCH_GMS_STOLEN_1024 0x30
-#define I830_GMCH_GMS_STOLEN_8192 0x40
-
-#define I855_GMCH_GMS_MASK 0xF0
-#define I855_GMCH_GMS_STOLEN_0M 0x0
-#define I855_GMCH_GMS_STOLEN_1M (0x1 << 4)
-#define I855_GMCH_GMS_STOLEN_4M (0x2 << 4)
-#define I855_GMCH_GMS_STOLEN_8M (0x3 << 4)
-#define I855_GMCH_GMS_STOLEN_16M (0x4 << 4)
-#define I855_GMCH_GMS_STOLEN_32M (0x5 << 4)
-#define I915_GMCH_GMS_STOLEN_48M (0x6 << 4)
-#define I915_GMCH_GMS_STOLEN_64M (0x7 << 4)
-#define G33_GMCH_GMS_STOLEN_128M (0x8 << 4)
-#define G33_GMCH_GMS_STOLEN_256M (0x9 << 4)
-#define INTEL_GMCH_GMS_STOLEN_96M (0xa << 4)
-#define INTEL_GMCH_GMS_STOLEN_160M (0xb << 4)
-#define INTEL_GMCH_GMS_STOLEN_224M (0xc << 4)
-#define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4)
+/* valid for both I830_GMCH_CTRL and SNB_GMCH_CTRL */
+#define INTEL_GMCH_VGA_DISABLE (1 << 1)
#define I830_DRB3 0x63
#define I85X_DRB3 0x43
@@ -87,12 +87,12 @@ extern struct resource intel_graphics_stolen_res;
#define I830_ESMRAMC 0x91
#define I845_ESMRAMC 0x9e
#define I85X_ESMRAMC 0x61
-#define TSEG_ENABLE (1 << 0)
-#define I830_TSEG_SIZE_512K (0 << 1)
-#define I830_TSEG_SIZE_1M (1 << 1)
-#define I845_TSEG_SIZE_MASK (3 << 1)
-#define I845_TSEG_SIZE_512K (2 << 1)
-#define I845_TSEG_SIZE_1M (3 << 1)
+#define TSEG_ENABLE (1 << 0)
+#define I830_TSEG_SIZE_512K (0 << 1)
+#define I830_TSEG_SIZE_1M (1 << 1)
+#define I845_TSEG_SIZE_MASK (3 << 1)
+#define I845_TSEG_SIZE_512K (2 << 1)
+#define I845_TSEG_SIZE_1M (3 << 1)
#define INTEL_BSM 0x5c
#define INTEL_GEN11_BSM_DW0 0xc0
diff --git a/include/drm/intel/intel_gmd_interrupt_regs.h b/include/drm/intel/intel_gmd_interrupt_regs.h
new file mode 100644
index 000000000000..ce66c4151e76
--- /dev/null
+++ b/include/drm/intel/intel_gmd_interrupt_regs.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef _INTEL_GMD_INTERRUPT_REGS_H_
+#define _INTEL_GMD_INTERRUPT_REGS_H_
+
+#define I915_PM_INTERRUPT (1 << 31)
+#define I915_ISP_INTERRUPT (1 << 22)
+#define I915_LPE_PIPE_B_INTERRUPT (1 << 21)
+#define I915_LPE_PIPE_A_INTERRUPT (1 << 20)
+#define I915_MIPIC_INTERRUPT (1 << 19)
+#define I915_MIPIA_INTERRUPT (1 << 18)
+#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1 << 18)
+#define I915_DISPLAY_PORT_INTERRUPT (1 << 17)
+#define I915_DISPLAY_PIPE_C_HBLANK_INTERRUPT (1 << 16)
+#define I915_MASTER_ERROR_INTERRUPT (1 << 15)
+#define I915_DISPLAY_PIPE_B_HBLANK_INTERRUPT (1 << 14)
+#define I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT (1 << 14) /* p-state */
+#define I915_DISPLAY_PIPE_A_HBLANK_INTERRUPT (1 << 13)
+#define I915_HWB_OOM_INTERRUPT (1 << 13)
+#define I915_LPE_PIPE_C_INTERRUPT (1 << 12)
+#define I915_SYNC_STATUS_INTERRUPT (1 << 12)
+#define I915_MISC_INTERRUPT (1 << 11)
+#define I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT (1 << 11)
+#define I915_DISPLAY_PIPE_C_VBLANK_INTERRUPT (1 << 10)
+#define I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT (1 << 10)
+#define I915_DISPLAY_PIPE_C_EVENT_INTERRUPT (1 << 9)
+#define I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT (1 << 9)
+#define I915_DISPLAY_PIPE_C_DPBM_INTERRUPT (1 << 8)
+#define I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT (1 << 8)
+#define I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT (1 << 7)
+#define I915_DISPLAY_PIPE_A_EVENT_INTERRUPT (1 << 6)
+#define I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT (1 << 5)
+#define I915_DISPLAY_PIPE_B_EVENT_INTERRUPT (1 << 4)
+#define I915_DISPLAY_PIPE_A_DPBM_INTERRUPT (1 << 3)
+#define I915_DISPLAY_PIPE_B_DPBM_INTERRUPT (1 << 2)
+#define I915_DEBUG_INTERRUPT (1 << 2)
+#define I915_WINVALID_INTERRUPT (1 << 1)
+#define I915_USER_INTERRUPT (1 << 1)
+#define I915_ASLE_INTERRUPT (1 << 0)
+#define I915_BSD_USER_INTERRUPT (1 << 25)
+
+#define GEN8_MASTER_IRQ _MMIO(0x44200)
+#define GEN8_MASTER_IRQ_CONTROL (1 << 31)
+#define GEN8_PCU_IRQ (1 << 30)
+#define GEN8_DE_PCH_IRQ (1 << 23)
+#define GEN8_DE_MISC_IRQ (1 << 22)
+#define GEN8_DE_PORT_IRQ (1 << 20)
+#define GEN8_DE_PIPE_C_IRQ (1 << 18)
+#define GEN8_DE_PIPE_B_IRQ (1 << 17)
+#define GEN8_DE_PIPE_A_IRQ (1 << 16)
+#define GEN8_DE_PIPE_IRQ(pipe) (1 << (16 + (pipe)))
+#define GEN8_GT_VECS_IRQ (1 << 6)
+#define GEN8_GT_GUC_IRQ (1 << 5)
+#define GEN8_GT_PM_IRQ (1 << 4)
+#define GEN8_GT_VCS1_IRQ (1 << 3) /* NB: VCS2 in bspec! */
+#define GEN8_GT_VCS0_IRQ (1 << 2) /* NB: VCS1 in bpsec! */
+#define GEN8_GT_BCS_IRQ (1 << 1)
+#define GEN8_GT_RCS_IRQ (1 << 0)
+
+#define GEN11_GU_MISC_ISR _MMIO(0x444f0)
+#define GEN11_GU_MISC_IMR _MMIO(0x444f4)
+#define GEN11_GU_MISC_IIR _MMIO(0x444f8)
+#define GEN11_GU_MISC_IER _MMIO(0x444fc)
+#define GEN11_GU_MISC_GSE (1 << 27)
+
+#define GEN11_GU_MISC_IRQ_REGS I915_IRQ_REGS(GEN11_GU_MISC_IMR, \
+ GEN11_GU_MISC_IER, \
+ GEN11_GU_MISC_IIR)
+
+#define GEN11_GFX_MSTR_IRQ _MMIO(0x190010)
+#define GEN11_MASTER_IRQ (1 << 31)
+#define GEN11_PCU_IRQ (1 << 30)
+#define GEN11_GU_MISC_IRQ (1 << 29)
+#define GEN11_DISPLAY_IRQ (1 << 16)
+#define GEN11_GT_DW_IRQ(x) (1 << (x))
+#define GEN11_GT_DW1_IRQ (1 << 1)
+#define GEN11_GT_DW0_IRQ (1 << 0)
+
+#define SCPD0 _MMIO(0x209c) /* 915+ only */
+#define SCPD_FBC_IGNORE_3D (1 << 6)
+#define CSTATE_RENDER_CLOCK_GATE_DISABLE (1 << 5)
+
+#define VLV_IIR_RW _MMIO(VLV_DISPLAY_BASE + 0x2084)
+#define VLV_IER _MMIO(VLV_DISPLAY_BASE + 0x20a0)
+#define VLV_IIR _MMIO(VLV_DISPLAY_BASE + 0x20a4)
+#define VLV_IMR _MMIO(VLV_DISPLAY_BASE + 0x20a8)
+#define VLV_ISR _MMIO(VLV_DISPLAY_BASE + 0x20ac)
+#define VLV_PCBR _MMIO(VLV_DISPLAY_BASE + 0x2120)
+#define VLV_PCBR_ADDR_SHIFT 12
+
+#endif
diff --git a/include/drm/intel/intel_gmd_misc_regs.h b/include/drm/intel/intel_gmd_misc_regs.h
new file mode 100644
index 000000000000..763d7711f21c
--- /dev/null
+++ b/include/drm/intel/intel_gmd_misc_regs.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef _INTEL_GMD_MISC_REGS_H_
+#define _INTEL_GMD_MISC_REGS_H_
+
+#define DISP_ARB_CTL _MMIO(0x45000)
+#define DISP_FBC_MEMORY_WAKE REG_BIT(31)
+#define DISP_TILE_SURFACE_SWIZZLING REG_BIT(13)
+#define DISP_FBC_WM_DIS REG_BIT(15)
+
+#define INSTPM _MMIO(0x20c0)
+#define INSTPM_SELF_EN (1 << 12) /* 915GM only */
+#define INSTPM_AGPBUSY_INT_EN (1 << 11) /* gen3: when disabled, pending interrupts
+ will not assert AGPBUSY# and will only
+ be delivered when out of C3. */
+#define INSTPM_FORCE_ORDERING (1 << 7) /* GEN6+ */
+#define INSTPM_TLB_INVALIDATE (1 << 9)
+#define INSTPM_SYNC_FLUSH (1 << 5)
+
+#endif
diff --git a/include/drm/intel/intel_pcode_regs.h b/include/drm/intel/intel_pcode_regs.h
new file mode 100644
index 000000000000..db989ee7c488
--- /dev/null
+++ b/include/drm/intel/intel_pcode_regs.h
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef _INTEL_PCODE_REGS_H_
+#define _INTEL_PCODE_REGS_H_
+
+#define GEN6_PCODE_MAILBOX _MMIO(0x138124)
+#define GEN6_PCODE_READY (1 << 31)
+#define GEN6_PCODE_MB_PARAM2 REG_GENMASK(23, 16)
+#define GEN6_PCODE_MB_PARAM1 REG_GENMASK(15, 8)
+#define GEN6_PCODE_MB_COMMAND REG_GENMASK(7, 0)
+#define GEN6_PCODE_ERROR_MASK 0xFF
+#define GEN6_PCODE_SUCCESS 0x0
+#define GEN6_PCODE_ILLEGAL_CMD 0x1
+#define GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x2
+#define GEN6_PCODE_TIMEOUT 0x3
+#define GEN6_PCODE_UNIMPLEMENTED_CMD 0xFF
+#define GEN7_PCODE_TIMEOUT 0x2
+#define GEN7_PCODE_ILLEGAL_DATA 0x3
+#define GEN11_PCODE_ILLEGAL_SUBCOMMAND 0x4
+#define GEN11_PCODE_LOCKED 0x6
+#define GEN11_PCODE_REJECTED 0x11
+#define GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10
+#define GEN6_PCODE_WRITE_RC6VIDS 0x4
+#define GEN6_PCODE_READ_RC6VIDS 0x5
+#define GEN6_ENCODE_RC6_VID(mv) (((mv) - 245) / 5)
+#define GEN6_DECODE_RC6_VID(vids) (((vids) * 5) + 245)
+#define BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ 0x18
+#define GEN9_PCODE_READ_MEM_LATENCY 0x6
+#define GEN9_MEM_LATENCY_LEVEL_3_7_MASK REG_GENMASK(31, 24)
+#define GEN9_MEM_LATENCY_LEVEL_2_6_MASK REG_GENMASK(23, 16)
+#define GEN9_MEM_LATENCY_LEVEL_1_5_MASK REG_GENMASK(15, 8)
+#define GEN9_MEM_LATENCY_LEVEL_0_4_MASK REG_GENMASK(7, 0)
+#define SKL_PCODE_LOAD_HDCP_KEYS 0x5
+#define SKL_PCODE_CDCLK_CONTROL 0x7
+#define SKL_CDCLK_PREPARE_FOR_CHANGE 0x3
+#define SKL_CDCLK_READY_FOR_CHANGE 0x1
+#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x8
+#define GEN6_PCODE_READ_MIN_FREQ_TABLE 0x9
+#define GEN6_READ_OC_PARAMS 0xc
+#define ICL_PCODE_MEM_SUBSYSYSTEM_INFO 0xd
+#define ICL_PCODE_MEM_SS_READ_GLOBAL_INFO (0x0 << 8)
+#define ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point) (((point) << 16) | (0x1 << 8))
+#define ADL_PCODE_MEM_SS_READ_PSF_GV_INFO ((0) | (0x2 << 8))
+#define DISPLAY_TO_PCODE_CDCLK_MAX 0x28D
+#define DISPLAY_TO_PCODE_VOLTAGE_MASK REG_GENMASK(1, 0)
+#define DISPLAY_TO_PCODE_VOLTAGE_MAX DISPLAY_TO_PCODE_VOLTAGE_MASK
+#define DISPLAY_TO_PCODE_CDCLK_VALID REG_BIT(27)
+#define DISPLAY_TO_PCODE_PIPE_COUNT_VALID REG_BIT(31)
+#define DISPLAY_TO_PCODE_CDCLK_MASK REG_GENMASK(25, 16)
+#define DISPLAY_TO_PCODE_PIPE_COUNT_MASK REG_GENMASK(30, 28)
+#define DISPLAY_TO_PCODE_CDCLK(x) REG_FIELD_PREP(DISPLAY_TO_PCODE_CDCLK_MASK, (x))
+#define DISPLAY_TO_PCODE_PIPE_COUNT(x) REG_FIELD_PREP(DISPLAY_TO_PCODE_PIPE_COUNT_MASK, (x))
+#define DISPLAY_TO_PCODE_VOLTAGE(x) REG_FIELD_PREP(DISPLAY_TO_PCODE_VOLTAGE_MASK, (x))
+#define DISPLAY_TO_PCODE_UPDATE_MASK(cdclk, num_pipes, voltage_level) \
+ ((DISPLAY_TO_PCODE_CDCLK(cdclk)) | \
+ (DISPLAY_TO_PCODE_PIPE_COUNT(num_pipes)) | \
+ (DISPLAY_TO_PCODE_VOLTAGE(voltage_level)))
+#define ICL_PCODE_SAGV_DE_MEM_SS_CONFIG 0xe
+#define ICL_PCODE_REP_QGV_MASK REG_GENMASK(1, 0)
+#define ICL_PCODE_REP_QGV_SAFE REG_FIELD_PREP(ICL_PCODE_REP_QGV_MASK, 0)
+#define ICL_PCODE_REP_QGV_POLL REG_FIELD_PREP(ICL_PCODE_REP_QGV_MASK, 1)
+#define ICL_PCODE_REP_QGV_REJECTED REG_FIELD_PREP(ICL_PCODE_REP_QGV_MASK, 2)
+#define ADLS_PCODE_REP_PSF_MASK REG_GENMASK(3, 2)
+#define ADLS_PCODE_REP_PSF_SAFE REG_FIELD_PREP(ADLS_PCODE_REP_PSF_MASK, 0)
+#define ADLS_PCODE_REP_PSF_POLL REG_FIELD_PREP(ADLS_PCODE_REP_PSF_MASK, 1)
+#define ADLS_PCODE_REP_PSF_REJECTED REG_FIELD_PREP(ADLS_PCODE_REP_PSF_MASK, 2)
+#define ICL_PCODE_REQ_QGV_PT_MASK REG_GENMASK(7, 0)
+#define ICL_PCODE_REQ_QGV_PT(x) REG_FIELD_PREP(ICL_PCODE_REQ_QGV_PT_MASK, (x))
+#define ADLS_PCODE_REQ_PSF_PT_MASK REG_GENMASK(10, 8)
+#define ADLS_PCODE_REQ_PSF_PT(x) REG_FIELD_PREP(ADLS_PCODE_REQ_PSF_PT_MASK, (x))
+#define GEN6_PCODE_READ_D_COMP 0x10
+#define GEN6_PCODE_WRITE_D_COMP 0x11
+#define ICL_PCODE_EXIT_TCCOLD 0x12
+#define HSW_PCODE_DE_WRITE_FREQ_REQ 0x17
+#define DISPLAY_IPS_CONTROL 0x19
+#define TGL_PCODE_TCCOLD 0x26
+#define TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED REG_BIT(0)
+#define TGL_PCODE_EXIT_TCCOLD_DATA_L_BLOCK_REQ 0
+#define TGL_PCODE_EXIT_TCCOLD_DATA_L_UNBLOCK_REQ REG_BIT(0)
+/* See also IPS_CTL */
+#define IPS_PCODE_CONTROL (1 << 30)
+#define HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL 0x1A
+#define GEN9_PCODE_SAGV_CONTROL 0x21
+#define GEN9_SAGV_DISABLE 0x0
+#define GEN9_SAGV_IS_DISABLED 0x1
+#define GEN9_SAGV_ENABLE 0x3
+#define DG1_PCODE_STATUS 0x7E
+#define DG1_UNCORE_GET_INIT_STATUS 0x0
+#define DG1_UNCORE_INIT_STATUS_COMPLETE 0x1
+#define PCODE_POWER_SETUP 0x7C
+#define POWER_SETUP_SUBCOMMAND_READ_I1 0x4
+#define POWER_SETUP_SUBCOMMAND_WRITE_I1 0x5
+#define POWER_SETUP_I1_WATTS REG_BIT(31)
+#define POWER_SETUP_I1_SHIFT 6 /* 10.6 fixed point format */
+#define POWER_SETUP_I1_DATA_MASK REG_GENMASK(15, 0)
+#define POWER_SETUP_SUBCOMMAND_G8_ENABLE 0x6
+#define GEN12_PCODE_READ_SAGV_BLOCK_TIME_US 0x23
+#define XEHP_PCODE_FREQUENCY_CONFIG 0x6e /* pvc */
+/* XEHP_PCODE_FREQUENCY_CONFIG sub-commands (param1) */
+#define PCODE_MBOX_FC_SC_READ_FUSED_P0 0x0
+#define PCODE_MBOX_FC_SC_READ_FUSED_PN 0x1
+/* PCODE_MBOX_DOMAIN_* - mailbox domain IDs */
+/* XEHP_PCODE_FREQUENCY_CONFIG param2 */
+#define PCODE_MBOX_DOMAIN_NONE 0x0
+#define PCODE_MBOX_DOMAIN_MEDIAFF 0x3
+
+#endif
diff --git a/include/drm/intel/pick.h b/include/drm/intel/pick.h
new file mode 100644
index 000000000000..d976fab8f270
--- /dev/null
+++ b/include/drm/intel/pick.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef _PICK_H_
+#define _PICK_H_
+
+/*
+ * Given the first two numbers __a and __b of arbitrarily many evenly spaced
+ * numbers, pick the 0-based __index'th value.
+ *
+ * Always prefer this over _PICK() if the numbers are evenly spaced.
+ */
+#define _PICK_EVEN(__index, __a, __b) ((__a) + (__index) * ((__b) - (__a)))
+
+/*
+ * Like _PICK_EVEN(), but supports 2 ranges of evenly spaced address offsets.
+ * @__c_index corresponds to the index in which the second range starts to be
+ * used. Using math interval notation, the first range is used for indexes [ 0,
+ * @__c_index), while the second range is used for [ @__c_index, ... ). Example:
+ *
+ * #define _FOO_A 0xf000
+ * #define _FOO_B 0xf004
+ * #define _FOO_C 0xf008
+ * #define _SUPER_FOO_A 0xa000
+ * #define _SUPER_FOO_B 0xa100
+ * #define FOO(x) _MMIO(_PICK_EVEN_2RANGES(x, 3, \
+ * _FOO_A, _FOO_B, \
+ * _SUPER_FOO_A, _SUPER_FOO_B))
+ *
+ * This expands to:
+ * 0: 0xf000,
+ * 1: 0xf004,
+ * 2: 0xf008,
+ * 3: 0xa000,
+ * 4: 0xa100,
+ * 5: 0xa200,
+ * ...
+ */
+#define _PICK_EVEN_2RANGES(__index, __c_index, __a, __b, __c, __d) \
+ (BUILD_BUG_ON_ZERO(!__is_constexpr(__c_index)) + \
+ ((__index) < (__c_index) ? _PICK_EVEN(__index, __a, __b) : \
+ _PICK_EVEN((__index) - (__c_index), __c, __d)))
+
+/*
+ * Given the arbitrary numbers in varargs, pick the 0-based __index'th number.
+ *
+ * Always prefer _PICK_EVEN() over this if the numbers are evenly spaced.
+ */
+#define _PICK(__index, ...) (((const u32 []){ __VA_ARGS__ })[__index])
+
+#endif
diff --git a/include/drm/intel/reg_bits.h b/include/drm/intel/reg_bits.h
new file mode 100644
index 000000000000..2a9066e1d808
--- /dev/null
+++ b/include/drm/intel/reg_bits.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: MIT */
+/* Copyright © 2026 Intel Corporation */
+
+#ifndef _REG_BITS_H_
+#define _REG_BITS_H_
+
+#include <linux/bitfield.h>
+#include <linux/bits.h>
+
+/*
+ * Wrappers over the generic fixed width BIT_U*() and GENMASK_U*()
+ * implementations, for compatibility reasons with previous implementation.
+ */
+#define REG_GENMASK(high, low) GENMASK_U32(high, low)
+#define REG_GENMASK64(high, low) GENMASK_U64(high, low)
+#define REG_GENMASK16(high, low) GENMASK_U16(high, low)
+#define REG_GENMASK8(high, low) GENMASK_U8(high, low)
+
+#define REG_BIT(n) BIT_U32(n)
+#define REG_BIT64(n) BIT_U64(n)
+#define REG_BIT16(n) BIT_U16(n)
+#define REG_BIT8(n) BIT_U8(n)
+
+/*
+ * Local integer constant expression version of is_power_of_2().
+ */
+#define IS_POWER_OF_2(__x) ((__x) && (((__x) & ((__x) - 1)) == 0))
+
+/**
+ * REG_FIELD_PREP8() - Prepare a u8 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to put in the field
+ *
+ * Local copy of FIELD_PREP() to generate an integer constant expression, force
+ * u8 and for consistency with REG_FIELD_GET8(), REG_BIT8() and REG_GENMASK8().
+ *
+ * @return: @__val masked and shifted into the field defined by @__mask.
+ */
+#define REG_FIELD_PREP8(__mask, __val) \
+ ((u8)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) + \
+ BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) + \
+ BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U8_MAX) + \
+ BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \
+ BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))
+
+/**
+ * REG_FIELD_PREP16() - Prepare a u16 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to put in the field
+ *
+ * Local copy of FIELD_PREP16() to generate an integer constant
+ * expression, force u8 and for consistency with
+ * REG_FIELD_GET16(), REG_BIT16() and REG_GENMASK16().
+ *
+ * @return: @__val masked and shifted into the field defined by @__mask.
+ */
+#define REG_FIELD_PREP16(__mask, __val) \
+ ((u16)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) + \
+ BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) + \
+ BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U16_MAX) + \
+ BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \
+ BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))
+
+/**
+ * REG_FIELD_PREP() - Prepare a u32 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to put in the field
+ *
+ * Local copy of FIELD_PREP() to generate an integer constant expression, force
+ * u32 and for consistency with REG_FIELD_GET(), REG_BIT() and REG_GENMASK().
+ *
+ * @return: @__val masked and shifted into the field defined by @__mask.
+ */
+#define REG_FIELD_PREP(__mask, __val) \
+ ((u32)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) + \
+ BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) + \
+ BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U32_MAX) + \
+ BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \
+ BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0))))
+
+/**
+ * REG_FIELD_GET8() - Extract a u8 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to extract the bitfield value from
+ *
+ * Local wrapper for FIELD_GET() to force u8 and for consistency with
+ * REG_FIELD_PREP(), REG_BIT() and REG_GENMASK().
+ *
+ * @return: Masked and shifted value of the field defined by @__mask in @__val.
+ */
+#define REG_FIELD_GET8(__mask, __val) ((u8)FIELD_GET(__mask, __val))
+
+/**
+ * REG_FIELD_GET() - Extract a u32 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to extract the bitfield value from
+ *
+ * Local wrapper for FIELD_GET() to force u32 and for consistency with
+ * REG_FIELD_PREP(), REG_BIT() and REG_GENMASK().
+ *
+ * @return: Masked and shifted value of the field defined by @__mask in @__val.
+ */
+#define REG_FIELD_GET(__mask, __val) ((u32)FIELD_GET(__mask, __val))
+
+/**
+ * REG_FIELD_GET64() - Extract a u64 bitfield value
+ * @__mask: shifted mask defining the field's length and position
+ * @__val: value to extract the bitfield value from
+ *
+ * Local wrapper for FIELD_GET() to force u64 and for consistency with
+ * REG_GENMASK64().
+ *
+ * @return: Masked and shifted value of the field defined by @__mask in @__val.
+ */
+#define REG_FIELD_GET64(__mask, __val) ((u64)FIELD_GET(__mask, __val))
+
+/**
+ * REG_FIELD_MAX() - produce the maximum value representable by a field
+ * @__mask: shifted mask defining the field's length and position
+ *
+ * Local wrapper for FIELD_MAX() to return the maximum bit value that can
+ * be held in the field specified by @_mask, cast to u32 for consistency
+ * with other macros.
+ */
+#define REG_FIELD_MAX(__mask) ((u32)FIELD_MAX(__mask))
+
+#define REG_MASKED_FIELD(mask, value) \
+ (BUILD_BUG_ON_ZERO(__builtin_choose_expr(__builtin_constant_p(mask), (mask) & 0xffff0000, 0)) + \
+ BUILD_BUG_ON_ZERO(__builtin_choose_expr(__builtin_constant_p(value), (value) & 0xffff0000, 0)) + \
+ BUILD_BUG_ON_ZERO(__builtin_choose_expr(__builtin_constant_p(mask) && __builtin_constant_p(value), (value) & ~(mask), 0)) + \
+ ((mask) << 16 | (value)))
+
+#define REG_MASKED_FIELD_ENABLE(a) \
+ (__builtin_choose_expr(__builtin_constant_p(a), REG_MASKED_FIELD((a), (a)), ({ typeof(a) _a = (a); REG_MASKED_FIELD(_a, _a); })))
+
+#define REG_MASKED_FIELD_DISABLE(a) \
+ (REG_MASKED_FIELD((a), 0))
+
+#endif
diff --git a/include/linux/gpu_buddy.h b/include/linux/gpu_buddy.h
index f1fb6eff604a..5fa917ba5450 100644
--- a/include/linux/gpu_buddy.h
+++ b/include/linux/gpu_buddy.h
@@ -11,6 +11,7 @@
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/rbtree.h>
+#include <linux/rbtree_augmented.h>
/**
* GPU_BUDDY_RANGE_ALLOCATION - Allocate within a specific address range
@@ -128,6 +129,7 @@ struct gpu_buddy_block {
};
/* private: */
struct list_head tmp_link;
+ unsigned int subtree_max_alignment;
};
/* Order-zero must be at least SZ_4K */
diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h
index bdd2e0652bc3..53edd69acb9b 100644
--- a/include/linux/iopoll.h
+++ b/include/linux/iopoll.h
@@ -159,7 +159,7 @@
*
* This macro does not rely on timekeeping. Hence it is safe to call even when
* timekeeping is suspended, at the expense of an underestimation of wall clock
- * time, which is rather minimal with a non-zero delay_us.
+ * time, which is rather minimal with a non-zero @delay_us.
*
* When available, you'll probably want to use one of the specialized
* macros defined below rather than this macro directly.
@@ -167,9 +167,9 @@
* Returns: 0 on success and -ETIMEDOUT upon a timeout. In either
* case, the last read value at @args is stored in @val.
*/
-#define read_poll_timeout_atomic(op, val, cond, sleep_us, timeout_us, \
- sleep_before_read, args...) \
- poll_timeout_us_atomic((val) = op(args), cond, sleep_us, timeout_us, sleep_before_read)
+#define read_poll_timeout_atomic(op, val, cond, delay_us, timeout_us, \
+ delay_before_read, args...) \
+ poll_timeout_us_atomic((val) = op(args), cond, delay_us, timeout_us, delay_before_read)
/**
* readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs
diff --git a/include/uapi/drm/amdxdna_accel.h b/include/uapi/drm/amdxdna_accel.h
index 9c44db2b3dcd..5bd13f4435f5 100644
--- a/include/uapi/drm/amdxdna_accel.h
+++ b/include/uapi/drm/amdxdna_accel.h
@@ -353,7 +353,8 @@ struct amdxdna_drm_query_clock_metadata {
};
enum amdxdna_sensor_type {
- AMDXDNA_SENSOR_TYPE_POWER
+ AMDXDNA_SENSOR_TYPE_POWER,
+ AMDXDNA_SENSOR_TYPE_COLUMN_UTILIZATION
};
/**
diff --git a/include/video/vga.h b/include/video/vga.h
index 468764d6727a..2f13c371800b 100644
--- a/include/video/vga.h
+++ b/include/video/vga.h
@@ -46,6 +46,7 @@
#define VGA_MIS_R 0x3CC /* Misc Output Read Register */
#define VGA_MIS_W 0x3C2 /* Misc Output Write Register */
#define VGA_FTC_R 0x3CA /* Feature Control Read Register */
+#define VGA_IS0_R 0x3C2 /* Input Status Register 0 */
#define VGA_IS1_RC 0x3DA /* Input Status Register 1 - color emulation */
#define VGA_IS1_RM 0x3BA /* Input Status Register 1 - mono emulation */
#define VGA_PEL_D 0x3C9 /* PEL Data Register */